2024-12-13 21:30:13,933 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-13 21:30:13,944 main DEBUG Took 0.009197 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-13 21:30:13,944 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-13 21:30:13,945 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-13 21:30:13,946 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-13 21:30:13,947 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,954 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-13 21:30:13,964 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,966 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,966 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,966 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,967 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,967 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,968 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,968 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,968 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,969 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,969 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,969 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,970 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,970 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,970 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,971 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,971 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,971 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,971 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,972 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,972 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,972 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,973 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,973 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 21:30:13,973 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,973 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-13 21:30:13,975 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 21:30:13,976 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-13 21:30:13,977 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-13 21:30:13,978 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-13 21:30:13,979 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-13 21:30:13,979 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-13 21:30:13,986 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-13 21:30:13,989 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-13 21:30:13,990 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-13 21:30:13,991 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-13 21:30:13,991 main DEBUG createAppenders(={Console}) 2024-12-13 21:30:13,992 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-13 21:30:13,992 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-13 21:30:13,992 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-13 21:30:13,993 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-13 21:30:13,993 main DEBUG OutputStream closed 2024-12-13 21:30:13,993 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-13 21:30:13,993 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-13 21:30:13,994 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-13 21:30:14,056 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-13 21:30:14,058 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-13 21:30:14,059 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-13 21:30:14,060 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-13 21:30:14,061 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-13 21:30:14,061 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-13 21:30:14,061 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-13 21:30:14,062 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-13 21:30:14,062 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-13 21:30:14,062 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-13 21:30:14,062 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-13 21:30:14,063 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-13 21:30:14,063 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-13 21:30:14,063 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-13 21:30:14,063 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-13 21:30:14,064 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-13 21:30:14,064 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-13 21:30:14,065 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-13 21:30:14,067 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13 21:30:14,067 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-13 21:30:14,067 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-13 21:30:14,068 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-13T21:30:14,269 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4 2024-12-13 21:30:14,271 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-13 21:30:14,271 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13T21:30:14,279 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-13T21:30:14,295 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T21:30:14,298 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f, deleteOnExit=true 2024-12-13T21:30:14,298 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T21:30:14,299 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/test.cache.data in system properties and HBase conf 2024-12-13T21:30:14,300 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T21:30:14,301 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/hadoop.log.dir in system properties and HBase conf 2024-12-13T21:30:14,302 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T21:30:14,302 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T21:30:14,303 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T21:30:14,391 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-13T21:30:14,480 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T21:30:14,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T21:30:14,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T21:30:14,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T21:30:14,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T21:30:14,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T21:30:14,485 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T21:30:14,485 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T21:30:14,485 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T21:30:14,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T21:30:14,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/nfs.dump.dir in system properties and HBase conf 2024-12-13T21:30:14,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/java.io.tmpdir in system properties and HBase conf 2024-12-13T21:30:14,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T21:30:14,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T21:30:14,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T21:30:15,433 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-13T21:30:15,512 INFO [Time-limited test {}] log.Log(170): Logging initialized @2446ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-13T21:30:15,577 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T21:30:15,633 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T21:30:15,650 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T21:30:15,650 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T21:30:15,651 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T21:30:15,662 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T21:30:15,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/hadoop.log.dir/,AVAILABLE} 2024-12-13T21:30:15,665 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T21:30:15,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0d4558{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/java.io.tmpdir/jetty-localhost-39829-hadoop-hdfs-3_4_1-tests_jar-_-any-14555271160277793182/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T21:30:15,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:39829} 2024-12-13T21:30:15,835 INFO [Time-limited test {}] server.Server(415): Started @2770ms 2024-12-13T21:30:16,281 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T21:30:16,289 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T21:30:16,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T21:30:16,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T21:30:16,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T21:30:16,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/hadoop.log.dir/,AVAILABLE} 2024-12-13T21:30:16,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T21:30:16,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bd2e890{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/java.io.tmpdir/jetty-localhost-43231-hadoop-hdfs-3_4_1-tests_jar-_-any-15627889599160305236/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T21:30:16,400 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:43231} 2024-12-13T21:30:16,400 INFO [Time-limited test {}] server.Server(415): Started @3335ms 2024-12-13T21:30:16,449 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T21:30:17,218 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/dfs/data/data2/current/BP-1981329684-172.17.0.3-1734125415023/current, will proceed with Du for space computation calculation, 2024-12-13T21:30:17,218 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/dfs/data/data1/current/BP-1981329684-172.17.0.3-1734125415023/current, will proceed with Du for space computation calculation, 2024-12-13T21:30:17,243 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T21:30:17,283 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94a921ec8cbf155 with lease ID 0x67f0da7b8e258cb8: Processing first storage report for DS-24f5cc3e-bc55-4040-a067-abed7c73186e from datanode DatanodeRegistration(127.0.0.1:46537, datanodeUuid=6c32d6a2-6df5-4930-9540-0263abbb0f59, infoPort=33127, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=283748956;c=1734125415023) 2024-12-13T21:30:17,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94a921ec8cbf155 with lease ID 0x67f0da7b8e258cb8: from storage DS-24f5cc3e-bc55-4040-a067-abed7c73186e node DatanodeRegistration(127.0.0.1:46537, datanodeUuid=6c32d6a2-6df5-4930-9540-0263abbb0f59, infoPort=33127, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=283748956;c=1734125415023), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T21:30:17,285 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94a921ec8cbf155 with lease ID 0x67f0da7b8e258cb8: Processing first storage report for DS-8bb4564a-da92-4355-85b5-2d80dffc3877 from datanode DatanodeRegistration(127.0.0.1:46537, datanodeUuid=6c32d6a2-6df5-4930-9540-0263abbb0f59, infoPort=33127, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=283748956;c=1734125415023) 2024-12-13T21:30:17,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94a921ec8cbf155 with lease ID 0x67f0da7b8e258cb8: from storage DS-8bb4564a-da92-4355-85b5-2d80dffc3877 node DatanodeRegistration(127.0.0.1:46537, datanodeUuid=6c32d6a2-6df5-4930-9540-0263abbb0f59, infoPort=33127, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=283748956;c=1734125415023), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T21:30:17,357 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4 2024-12-13T21:30:17,420 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/zookeeper_0, clientPort=57927, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T21:30:17,430 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=57927 2024-12-13T21:30:17,438 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:17,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:17,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741825_1001 (size=7) 2024-12-13T21:30:18,054 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 with version=8 2024-12-13T21:30:18,055 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/hbase-staging 2024-12-13T21:30:18,159 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-13T21:30:18,387 INFO [Time-limited test {}] client.ConnectionUtils(129): master/fd052dae32be:0 server-side Connection retries=45 2024-12-13T21:30:18,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T21:30:18,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T21:30:18,402 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T21:30:18,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T21:30:18,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T21:30:18,518 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T21:30:18,566 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-13T21:30:18,574 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-13T21:30:18,577 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T21:30:18,596 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 43194 (auto-detected) 2024-12-13T21:30:18,597 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-13T21:30:18,612 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:33659 2024-12-13T21:30:18,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:18,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:18,630 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33659 connecting to ZooKeeper ensemble=127.0.0.1:57927 2024-12-13T21:30:18,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:336590x0, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T21:30:18,729 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33659-0x100214d103e0000 connected 2024-12-13T21:30:18,803 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T21:30:18,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T21:30:18,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T21:30:18,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33659 2024-12-13T21:30:18,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33659 2024-12-13T21:30:18,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33659 2024-12-13T21:30:18,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33659 2024-12-13T21:30:18,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33659 2024-12-13T21:30:18,823 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05, hbase.cluster.distributed=false 2024-12-13T21:30:18,879 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/fd052dae32be:0 server-side Connection retries=45 2024-12-13T21:30:18,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T21:30:18,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T21:30:18,880 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T21:30:18,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T21:30:18,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T21:30:18,882 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T21:30:18,884 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T21:30:18,885 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:38989 2024-12-13T21:30:18,887 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T21:30:18,892 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T21:30:18,894 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:18,897 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:18,900 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:38989 connecting to ZooKeeper ensemble=127.0.0.1:57927 2024-12-13T21:30:18,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389890x0, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T21:30:18,909 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:389890x0, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T21:30:18,909 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38989-0x100214d103e0001 connected 2024-12-13T21:30:18,910 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T21:30:18,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T21:30:18,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38989 2024-12-13T21:30:18,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38989 2024-12-13T21:30:18,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38989 2024-12-13T21:30:18,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38989 2024-12-13T21:30:18,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38989 2024-12-13T21:30:18,918 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/fd052dae32be,33659,1734125418153 2024-12-13T21:30:18,931 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fd052dae32be:33659 2024-12-13T21:30:18,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T21:30:18,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T21:30:18,935 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fd052dae32be,33659,1734125418153 2024-12-13T21:30:18,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T21:30:18,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T21:30:18,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:18,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:18,959 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T21:30:18,959 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fd052dae32be,33659,1734125418153 from backup master directory 2024-12-13T21:30:18,960 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T21:30:18,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fd052dae32be,33659,1734125418153 2024-12-13T21:30:18,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T21:30:18,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T21:30:18,967 WARN [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T21:30:18,967 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fd052dae32be,33659,1734125418153 2024-12-13T21:30:18,969 INFO [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-13T21:30:18,971 INFO [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-13T21:30:19,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741826_1002 (size=42) 2024-12-13T21:30:19,437 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/hbase.id with ID: c4ee86ea-8e27-4e90-a477-1845d6081842 2024-12-13T21:30:19,479 INFO [master/fd052dae32be:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T21:30:19,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:19,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:19,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741827_1003 (size=196) 2024-12-13T21:30:19,979 INFO [master/fd052dae32be:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:30:19,982 INFO [master/fd052dae32be:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T21:30:19,996 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:20,000 INFO [master/fd052dae32be:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T21:30:20,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741828_1004 (size=1189) 2024-12-13T21:30:20,449 INFO [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store 2024-12-13T21:30:20,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741829_1005 (size=34) 2024-12-13T21:30:20,868 INFO [master/fd052dae32be:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-13T21:30:20,869 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:20,870 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T21:30:20,870 INFO [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:30:20,870 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:30:20,871 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T21:30:20,871 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:30:20,871 INFO [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:30:20,871 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T21:30:20,873 WARN [master/fd052dae32be:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/.initializing 2024-12-13T21:30:20,874 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/WALs/fd052dae32be,33659,1734125418153 2024-12-13T21:30:20,881 INFO [master/fd052dae32be:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T21:30:20,891 INFO [master/fd052dae32be:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fd052dae32be%2C33659%2C1734125418153, suffix=, logDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/WALs/fd052dae32be,33659,1734125418153, archiveDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/oldWALs, maxLogs=10 2024-12-13T21:30:20,908 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/WALs/fd052dae32be,33659,1734125418153/fd052dae32be%2C33659%2C1734125418153.1734125420895, exclude list is [], retry=0 2024-12-13T21:30:20,923 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46537,DS-24f5cc3e-bc55-4040-a067-abed7c73186e,DISK] 2024-12-13T21:30:20,926 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-13T21:30:20,957 INFO [master/fd052dae32be:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/WALs/fd052dae32be,33659,1734125418153/fd052dae32be%2C33659%2C1734125418153.1734125420895 2024-12-13T21:30:20,957 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33127:33127)] 2024-12-13T21:30:20,958 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:30:20,958 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:20,962 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:20,963 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:20,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T21:30:21,018 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:21,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:21,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T21:30:21,025 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:21,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:21,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T21:30:21,029 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:21,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:21,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T21:30:21,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:21,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:21,037 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,038 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,046 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T21:30:21,051 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T21:30:21,056 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:30:21,057 INFO [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62299213, jitterRate=-0.07166938483715057}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T21:30:21,061 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T21:30:21,062 INFO [master/fd052dae32be:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T21:30:21,086 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a07dc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:21,122 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T21:30:21,132 INFO [master/fd052dae32be:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T21:30:21,132 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T21:30:21,133 INFO [master/fd052dae32be:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T21:30:21,135 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-13T21:30:21,139 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-12-13T21:30:21,139 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T21:30:21,160 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T21:30:21,170 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T21:30:21,208 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T21:30:21,212 INFO [master/fd052dae32be:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T21:30:21,214 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T21:30:21,224 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T21:30:21,226 INFO [master/fd052dae32be:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T21:30:21,231 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T21:30:21,241 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T21:30:21,243 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T21:30:21,249 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T21:30:21,260 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T21:30:21,266 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T21:30:21,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T21:30:21,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T21:30:21,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:21,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:21,275 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=fd052dae32be,33659,1734125418153, sessionid=0x100214d103e0000, setting cluster-up flag (Was=false) 2024-12-13T21:30:21,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:21,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:21,324 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T21:30:21,327 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fd052dae32be,33659,1734125418153 2024-12-13T21:30:21,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:21,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:21,824 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T21:30:21,830 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fd052dae32be,33659,1734125418153 2024-12-13T21:30:21,905 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T21:30:21,911 INFO [master/fd052dae32be:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T21:30:21,913 INFO [master/fd052dae32be:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T21:30:21,917 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fd052dae32be,33659,1734125418153 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T21:30:21,919 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fd052dae32be:0, corePoolSize=5, maxPoolSize=5 2024-12-13T21:30:21,919 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fd052dae32be:0, corePoolSize=5, maxPoolSize=5 2024-12-13T21:30:21,920 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fd052dae32be:0, corePoolSize=5, maxPoolSize=5 2024-12-13T21:30:21,920 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fd052dae32be:0, corePoolSize=5, maxPoolSize=5 2024-12-13T21:30:21,920 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fd052dae32be:0, corePoolSize=10, maxPoolSize=10 2024-12-13T21:30:21,920 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:21,920 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fd052dae32be:0, corePoolSize=2, maxPoolSize=2 2024-12-13T21:30:21,920 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:21,922 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734125451922 2024-12-13T21:30:21,923 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T21:30:21,924 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T21:30:21,925 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T21:30:21,926 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T21:30:21,927 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T21:30:21,927 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T21:30:21,928 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T21:30:21,928 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T21:30:21,928 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:21,929 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:21,930 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T21:30:21,930 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T21:30:21,931 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T21:30:21,932 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T21:30:21,933 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fd052dae32be:38989 2024-12-13T21:30:21,934 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T21:30:21,935 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1008): ClusterId : c4ee86ea-8e27-4e90-a477-1845d6081842 2024-12-13T21:30:21,935 INFO [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T21:30:21,939 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T21:30:21,940 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fd052dae32be:0:becomeActiveMaster-HFileCleaner.large.0-1734125421937,5,FailOnTimeoutGroup] 2024-12-13T21:30:21,941 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fd052dae32be:0:becomeActiveMaster-HFileCleaner.small.0-1734125421941,5,FailOnTimeoutGroup] 2024-12-13T21:30:21,941 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:21,941 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T21:30:21,943 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:21,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741831_1007 (size=1039) 2024-12-13T21:30:21,943 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:21,960 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T21:30:21,960 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T21:30:21,967 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T21:30:21,967 DEBUG [RS:0;fd052dae32be:38989 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50a679dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:21,969 DEBUG [RS:0;fd052dae32be:38989 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ec758f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fd052dae32be/172.17.0.3:0 2024-12-13T21:30:21,971 INFO [RS:0;fd052dae32be:38989 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T21:30:21,971 INFO [RS:0;fd052dae32be:38989 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T21:30:21,971 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T21:30:21,973 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(3073): reportForDuty to master=fd052dae32be,33659,1734125418153 with isa=fd052dae32be/172.17.0.3:38989, startcode=1734125418878 2024-12-13T21:30:21,984 DEBUG [RS:0;fd052dae32be:38989 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T21:30:22,015 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58719, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T21:30:22,022 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33659 {}] master.ServerManager(332): Checking decommissioned status of RegionServer fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,025 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33659 {}] master.ServerManager(486): Registering regionserver=fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,037 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:30:22,037 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34065 2024-12-13T21:30:22,037 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T21:30:22,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T21:30:22,050 DEBUG [RS:0;fd052dae32be:38989 {}] zookeeper.ZKUtil(111): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,050 WARN [RS:0;fd052dae32be:38989 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T21:30:22,050 INFO [RS:0;fd052dae32be:38989 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T21:30:22,051 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,052 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fd052dae32be,38989,1734125418878] 2024-12-13T21:30:22,067 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T21:30:22,077 INFO [RS:0;fd052dae32be:38989 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T21:30:22,086 INFO [RS:0;fd052dae32be:38989 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T21:30:22,089 INFO [RS:0;fd052dae32be:38989 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T21:30:22,089 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,089 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T21:30:22,095 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,095 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fd052dae32be:0, corePoolSize=2, maxPoolSize=2 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,096 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,097 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,097 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fd052dae32be:0, corePoolSize=1, maxPoolSize=1 2024-12-13T21:30:22,097 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fd052dae32be:0, corePoolSize=3, maxPoolSize=3 2024-12-13T21:30:22,097 DEBUG [RS:0;fd052dae32be:38989 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0, corePoolSize=3, maxPoolSize=3 2024-12-13T21:30:22,098 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,098 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,098 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,098 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,098 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,38989,1734125418878-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T21:30:22,113 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T21:30:22,115 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,38989,1734125418878-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:22,130 INFO [RS:0;fd052dae32be:38989 {}] regionserver.Replication(204): fd052dae32be,38989,1734125418878 started 2024-12-13T21:30:22,130 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1767): Serving as fd052dae32be,38989,1734125418878, RpcServer on fd052dae32be/172.17.0.3:38989, sessionid=0x100214d103e0001 2024-12-13T21:30:22,131 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T21:30:22,131 DEBUG [RS:0;fd052dae32be:38989 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,131 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fd052dae32be,38989,1734125418878' 2024-12-13T21:30:22,131 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T21:30:22,132 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T21:30:22,133 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T21:30:22,133 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T21:30:22,133 DEBUG [RS:0;fd052dae32be:38989 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,133 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fd052dae32be,38989,1734125418878' 2024-12-13T21:30:22,133 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T21:30:22,133 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T21:30:22,134 DEBUG [RS:0;fd052dae32be:38989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T21:30:22,134 INFO [RS:0;fd052dae32be:38989 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T21:30:22,134 INFO [RS:0;fd052dae32be:38989 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T21:30:22,247 INFO [RS:0;fd052dae32be:38989 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T21:30:22,250 INFO [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fd052dae32be%2C38989%2C1734125418878, suffix=, logDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878, archiveDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/oldWALs, maxLogs=32 2024-12-13T21:30:22,263 DEBUG [RS:0;fd052dae32be:38989 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878/fd052dae32be%2C38989%2C1734125418878.1734125422252, exclude list is [], retry=0 2024-12-13T21:30:22,268 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46537,DS-24f5cc3e-bc55-4040-a067-abed7c73186e,DISK] 2024-12-13T21:30:22,271 INFO [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878/fd052dae32be%2C38989%2C1734125418878.1734125422252 2024-12-13T21:30:22,271 DEBUG [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33127:33127)] 2024-12-13T21:30:22,348 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T21:30:22,348 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:30:22,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741833_1009 (size=32) 2024-12-13T21:30:22,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:22,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T21:30:22,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T21:30:22,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:22,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:22,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T21:30:22,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T21:30:22,777 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:22,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:22,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T21:30:22,780 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T21:30:22,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:22,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:22,783 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740 2024-12-13T21:30:22,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740 2024-12-13T21:30:22,787 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:30:22,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T21:30:22,793 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:30:22,794 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67030252, jitterRate=-0.001171410083770752}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:30:22,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T21:30:22,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T21:30:22,796 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T21:30:22,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T21:30:22,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T21:30:22,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T21:30:22,798 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T21:30:22,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T21:30:22,800 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T21:30:22,800 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T21:30:22,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T21:30:22,812 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T21:30:22,814 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T21:30:22,969 DEBUG [fd052dae32be:33659 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T21:30:22,978 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:22,984 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fd052dae32be,38989,1734125418878, state=OPENING 2024-12-13T21:30:23,033 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T21:30:23,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:23,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:23,043 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T21:30:23,043 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T21:30:23,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:23,227 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:23,229 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T21:30:23,232 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T21:30:23,242 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T21:30:23,242 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T21:30:23,243 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-13T21:30:23,246 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fd052dae32be%2C38989%2C1734125418878.meta, suffix=.meta, logDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878, archiveDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/oldWALs, maxLogs=32 2024-12-13T21:30:23,260 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878/fd052dae32be%2C38989%2C1734125418878.meta.1734125423248.meta, exclude list is [], retry=0 2024-12-13T21:30:23,264 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46537,DS-24f5cc3e-bc55-4040-a067-abed7c73186e,DISK] 2024-12-13T21:30:23,266 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/WALs/fd052dae32be,38989,1734125418878/fd052dae32be%2C38989%2C1734125418878.meta.1734125423248.meta 2024-12-13T21:30:23,267 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33127:33127)] 2024-12-13T21:30:23,267 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:30:23,268 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T21:30:23,315 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T21:30:23,319 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T21:30:23,323 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T21:30:23,323 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:23,323 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T21:30:23,323 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T21:30:23,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T21:30:23,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T21:30:23,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:23,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:23,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T21:30:23,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T21:30:23,330 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:23,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:23,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T21:30:23,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T21:30:23,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:23,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T21:30:23,336 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740 2024-12-13T21:30:23,338 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740 2024-12-13T21:30:23,341 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:30:23,345 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T21:30:23,347 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67713949, jitterRate=0.009016469120979309}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:30:23,349 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T21:30:23,358 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734125423221 2024-12-13T21:30:23,368 DEBUG [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T21:30:23,369 INFO [RS_OPEN_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T21:30:23,370 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:23,371 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fd052dae32be,38989,1734125418878, state=OPEN 2024-12-13T21:30:23,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T21:30:23,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T21:30:23,466 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T21:30:23,466 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T21:30:23,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T21:30:23,474 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=fd052dae32be,38989,1734125418878 in 420 msec 2024-12-13T21:30:23,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T21:30:23,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 671 msec 2024-12-13T21:30:23,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.6210 sec 2024-12-13T21:30:23,488 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734125423488, completionTime=-1 2024-12-13T21:30:23,488 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T21:30:23,488 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T21:30:23,518 DEBUG [hconnection-0x1a289b3d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:23,520 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:23,529 INFO [master/fd052dae32be:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T21:30:23,529 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734125483529 2024-12-13T21:30:23,529 INFO [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734125543529 2024-12-13T21:30:23,529 INFO [master/fd052dae32be:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 40 msec 2024-12-13T21:30:23,574 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,33659,1734125418153-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:23,575 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,33659,1734125418153-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:23,575 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,33659,1734125418153-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:23,576 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fd052dae32be:33659, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:23,577 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:23,583 DEBUG [master/fd052dae32be:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T21:30:23,585 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T21:30:23,586 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T21:30:23,591 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T21:30:23,594 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:30:23,595 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:23,596 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:30:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741835_1011 (size=358) 2024-12-13T21:30:24,010 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3a53a56d105c8fa6ce8789bb1b9d7a71, NAME => 'hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:30:24,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741836_1012 (size=42) 2024-12-13T21:30:24,422 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:24,422 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3a53a56d105c8fa6ce8789bb1b9d7a71, disabling compactions & flushes 2024-12-13T21:30:24,422 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. after waiting 0 ms 2024-12-13T21:30:24,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,423 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,423 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3a53a56d105c8fa6ce8789bb1b9d7a71: 2024-12-13T21:30:24,428 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:30:24,435 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734125424429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125424429"}]},"ts":"1734125424429"} 2024-12-13T21:30:24,455 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:30:24,457 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:30:24,460 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125424457"}]},"ts":"1734125424457"} 2024-12-13T21:30:24,464 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T21:30:24,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3a53a56d105c8fa6ce8789bb1b9d7a71, ASSIGN}] 2024-12-13T21:30:24,520 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3a53a56d105c8fa6ce8789bb1b9d7a71, ASSIGN 2024-12-13T21:30:24,521 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3a53a56d105c8fa6ce8789bb1b9d7a71, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:30:24,673 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3a53a56d105c8fa6ce8789bb1b9d7a71, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:24,679 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3a53a56d105c8fa6ce8789bb1b9d7a71, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:24,835 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:24,846 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,846 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3a53a56d105c8fa6ce8789bb1b9d7a71, NAME => 'hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:30:24,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:24,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,850 INFO [StoreOpener-3a53a56d105c8fa6ce8789bb1b9d7a71-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,853 INFO [StoreOpener-3a53a56d105c8fa6ce8789bb1b9d7a71-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a53a56d105c8fa6ce8789bb1b9d7a71 columnFamilyName info 2024-12-13T21:30:24,853 DEBUG [StoreOpener-3a53a56d105c8fa6ce8789bb1b9d7a71-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:24,854 INFO [StoreOpener-3a53a56d105c8fa6ce8789bb1b9d7a71-1 {}] regionserver.HStore(327): Store=3a53a56d105c8fa6ce8789bb1b9d7a71/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:24,856 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,857 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,861 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:30:24,865 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:30:24,867 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3a53a56d105c8fa6ce8789bb1b9d7a71; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71883785, jitterRate=0.07115186750888824}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T21:30:24,868 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3a53a56d105c8fa6ce8789bb1b9d7a71: 2024-12-13T21:30:24,871 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71., pid=6, masterSystemTime=1734125424835 2024-12-13T21:30:24,874 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,875 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:30:24,875 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3a53a56d105c8fa6ce8789bb1b9d7a71, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:24,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T21:30:24,883 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3a53a56d105c8fa6ce8789bb1b9d7a71, server=fd052dae32be,38989,1734125418878 in 200 msec 2024-12-13T21:30:24,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T21:30:24,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3a53a56d105c8fa6ce8789bb1b9d7a71, ASSIGN in 365 msec 2024-12-13T21:30:24,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:30:24,887 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125424887"}]},"ts":"1734125424887"} 2024-12-13T21:30:24,890 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T21:30:24,935 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T21:30:24,936 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:30:24,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T21:30:24,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:24,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:30:24,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3490 sec 2024-12-13T21:30:24,966 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T21:30:24,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T21:30:25,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 39 msec 2024-12-13T21:30:25,010 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T21:30:25,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T21:30:25,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 25 msec 2024-12-13T21:30:25,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T21:30:25,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T21:30:25,075 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 6.108sec 2024-12-13T21:30:25,078 INFO [master/fd052dae32be:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T21:30:25,081 INFO [master/fd052dae32be:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T21:30:25,083 INFO [master/fd052dae32be:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T21:30:25,085 INFO [master/fd052dae32be:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T21:30:25,085 INFO [master/fd052dae32be:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T21:30:25,087 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,33659,1734125418153-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T21:30:25,087 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,33659,1734125418153-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T21:30:25,093 DEBUG [master/fd052dae32be:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T21:30:25,094 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T21:30:25,094 INFO [master/fd052dae32be:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fd052dae32be,33659,1734125418153-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T21:30:25,143 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a3c3fb3 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560d619d 2024-12-13T21:30:25,144 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-13T21:30:25,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e64d5c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:25,162 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-13T21:30:25,162 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-13T21:30:25,171 DEBUG [hconnection-0x5b22ea21-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:25,178 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:25,187 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=fd052dae32be,33659,1734125418153 2024-12-13T21:30:25,200 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=154, ProcessCount=11, AvailableMemoryMB=3413 2024-12-13T21:30:25,209 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:30:25,212 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:30:25,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:30:25,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:30:25,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:25,228 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:30:25,228 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:25,230 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-13T21:30:25,230 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:30:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T21:30:25,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741837_1013 (size=963) 2024-12-13T21:30:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T21:30:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T21:30:25,673 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:30:25,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741838_1014 (size=53) 2024-12-13T21:30:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T21:30:26,085 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:26,085 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing cc66b42faaed28a8693a712966f73789, disabling compactions & flushes 2024-12-13T21:30:26,085 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,085 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,086 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. after waiting 0 ms 2024-12-13T21:30:26,086 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,086 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,086 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:26,088 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:30:26,089 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734125426088"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125426088"}]},"ts":"1734125426088"} 2024-12-13T21:30:26,092 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:30:26,094 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:30:26,095 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125426094"}]},"ts":"1734125426094"} 2024-12-13T21:30:26,098 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-13T21:30:26,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, ASSIGN}] 2024-12-13T21:30:26,151 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, ASSIGN 2024-12-13T21:30:26,153 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:30:26,304 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=cc66b42faaed28a8693a712966f73789, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:26,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:26,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T21:30:26,464 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:26,472 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,472 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:30:26,472 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,472 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:26,473 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,473 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,475 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,479 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:26,479 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc66b42faaed28a8693a712966f73789 columnFamilyName A 2024-12-13T21:30:26,479 DEBUG [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:26,480 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.HStore(327): Store=cc66b42faaed28a8693a712966f73789/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:26,481 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,482 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:26,483 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc66b42faaed28a8693a712966f73789 columnFamilyName B 2024-12-13T21:30:26,483 DEBUG [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:26,483 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.HStore(327): Store=cc66b42faaed28a8693a712966f73789/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:26,484 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,485 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:26,485 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc66b42faaed28a8693a712966f73789 columnFamilyName C 2024-12-13T21:30:26,486 DEBUG [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:26,486 INFO [StoreOpener-cc66b42faaed28a8693a712966f73789-1 {}] regionserver.HStore(327): Store=cc66b42faaed28a8693a712966f73789/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:26,487 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,488 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,489 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,491 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:30:26,493 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:26,496 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:30:26,497 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened cc66b42faaed28a8693a712966f73789; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66659030, jitterRate=-0.006703048944473267}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:30:26,498 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:26,499 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., pid=11, masterSystemTime=1734125426464 2024-12-13T21:30:26,501 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,502 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:26,502 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=cc66b42faaed28a8693a712966f73789, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:26,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T21:30:26,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 in 197 msec 2024-12-13T21:30:26,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T21:30:26,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, ASSIGN in 359 msec 2024-12-13T21:30:26,512 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:30:26,512 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125426512"}]},"ts":"1734125426512"} 2024-12-13T21:30:26,515 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-13T21:30:26,561 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:30:26,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3380 sec 2024-12-13T21:30:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T21:30:27,377 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-13T21:30:27,385 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e59596a to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30640414 2024-12-13T21:30:27,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36ea98cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,439 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,441 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,445 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:30:27,447 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:30:27,454 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cac4303 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@536a4a58 2024-12-13T21:30:27,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b3a6cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,468 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39b10898 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3888ad7c 2024-12-13T21:30:27,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b132d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d7115de to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2dd0bbda 2024-12-13T21:30:27,490 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dd77b4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x30d4d4c6 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18f2a76d 2024-12-13T21:30:27,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a33c837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,503 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c943d to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@435176b2 2024-12-13T21:30:27,515 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37577c9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f0c7188 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e957ecd 2024-12-13T21:30:27,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37950159, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x475ca0f4 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22daddc4 2024-12-13T21:30:27,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d5a9f0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,541 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50c9c1d1 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39028e20 2024-12-13T21:30:27,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d4c9c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f1331a9 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@624dc5e5 2024-12-13T21:30:27,558 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bb819cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:27,563 DEBUG [hconnection-0x34e8847a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,563 DEBUG [hconnection-0x4a22ee9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,564 DEBUG [hconnection-0x106fdee5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,565 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,566 DEBUG [hconnection-0x624e92fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,566 DEBUG [hconnection-0x23bc3a57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,567 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,567 DEBUG [hconnection-0x134cb240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,568 DEBUG [hconnection-0x2ce6d49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,568 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,568 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,568 DEBUG [hconnection-0x5e621c7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:27,572 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,572 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,573 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:27,576 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34564, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,576 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,579 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-13T21:30:27,586 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:27,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:27,587 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:27,589 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:27,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:30:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:27,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:27,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:27,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:27,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:27,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:27,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:27,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fee04714259b47fcbc4b85477efaa914 is 50, key is test_row_0/A:col10/1734125427600/Put/seqid=0 2024-12-13T21:30:27,753 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:27,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:27,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:27,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:27,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741839_1015 (size=12001) 2024-12-13T21:30:27,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:27,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125487774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125487770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125487790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125487791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125487791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:27,931 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:27,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:27,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:27,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:27,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:27,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:27,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125487930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125487930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125487930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125487932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:27,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125487930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,072 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-13T21:30:28,074 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T21:30:28,075 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T21:30:28,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125488139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125488141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125488142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125488143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125488144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fee04714259b47fcbc4b85477efaa914 2024-12-13T21:30:28,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:28,249 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:28,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:28,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/f12579fa62154ea88cea17b2dce6d562 is 50, key is test_row_0/B:col10/1734125427600/Put/seqid=0 2024-12-13T21:30:28,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741840_1016 (size=12001) 2024-12-13T21:30:28,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/f12579fa62154ea88cea17b2dce6d562 2024-12-13T21:30:28,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e3d41718a04943238e85358014990b29 is 50, key is test_row_0/C:col10/1734125427600/Put/seqid=0 2024-12-13T21:30:28,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741841_1017 (size=12001) 2024-12-13T21:30:28,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e3d41718a04943238e85358014990b29 2024-12-13T21:30:28,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fee04714259b47fcbc4b85477efaa914 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fee04714259b47fcbc4b85477efaa914 2024-12-13T21:30:28,405 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:28,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:28,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fee04714259b47fcbc4b85477efaa914, entries=150, sequenceid=13, filesize=11.7 K 2024-12-13T21:30:28,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/f12579fa62154ea88cea17b2dce6d562 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f12579fa62154ea88cea17b2dce6d562 2024-12-13T21:30:28,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f12579fa62154ea88cea17b2dce6d562, entries=150, sequenceid=13, filesize=11.7 K 2024-12-13T21:30:28,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e3d41718a04943238e85358014990b29 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e3d41718a04943238e85358014990b29 2024-12-13T21:30:28,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e3d41718a04943238e85358014990b29, entries=150, sequenceid=13, filesize=11.7 K 2024-12-13T21:30:28,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for cc66b42faaed28a8693a712966f73789 in 812ms, sequenceid=13, compaction requested=false 2024-12-13T21:30:28,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:28,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:28,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-13T21:30:28,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:28,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:28,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:28,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:28,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:28,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:28,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125488472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/38b39442bfe2498585495c7e87dac905 is 50, key is test_row_0/A:col10/1734125427768/Put/seqid=0 2024-12-13T21:30:28,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125488493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125488493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125488494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125488497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741842_1018 (size=14341) 2024-12-13T21:30:28,560 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:28,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:28,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125488600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125488614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125488615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125488616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125488618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:28,719 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:28,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:28,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125488806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125488818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125488824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125488826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:28,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125488826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,834 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:30:28,875 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:28,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:28,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:28,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:28,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:28,904 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T21:30:28,904 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-13T21:30:28,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T21:30:28,906 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-13T21:30:28,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T21:30:28,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-13T21:30:28,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-13T21:30:28,908 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-13T21:30:28,910 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-13T21:30:28,910 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-13T21:30:28,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/38b39442bfe2498585495c7e87dac905 2024-12-13T21:30:28,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/bddfd5f0d68447f28966759f7685c2a3 is 50, key is test_row_0/B:col10/1734125427768/Put/seqid=0 2024-12-13T21:30:28,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741843_1019 (size=12001) 2024-12-13T21:30:29,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/bddfd5f0d68447f28966759f7685c2a3 2024-12-13T21:30:29,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4bad27e334504d8b82a34e8b5e31ed7c is 50, key is test_row_0/C:col10/1734125427768/Put/seqid=0 2024-12-13T21:30:29,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:29,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:29,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741844_1020 (size=12001) 2024-12-13T21:30:29,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4bad27e334504d8b82a34e8b5e31ed7c 2024-12-13T21:30:29,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/38b39442bfe2498585495c7e87dac905 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/38b39442bfe2498585495c7e87dac905 2024-12-13T21:30:29,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/38b39442bfe2498585495c7e87dac905, entries=200, sequenceid=40, filesize=14.0 K 2024-12-13T21:30:29,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/bddfd5f0d68447f28966759f7685c2a3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/bddfd5f0d68447f28966759f7685c2a3 2024-12-13T21:30:29,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/bddfd5f0d68447f28966759f7685c2a3, entries=150, sequenceid=40, filesize=11.7 K 2024-12-13T21:30:29,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4bad27e334504d8b82a34e8b5e31ed7c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4bad27e334504d8b82a34e8b5e31ed7c 2024-12-13T21:30:29,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4bad27e334504d8b82a34e8b5e31ed7c, entries=150, sequenceid=40, filesize=11.7 K 2024-12-13T21:30:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125489113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for cc66b42faaed28a8693a712966f73789 in 671ms, sequenceid=40, compaction requested=false 2024-12-13T21:30:29,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:29,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:29,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:30:29,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:29,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:29,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:29,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:29,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:29,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:29,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/05c50dca0efb4334acc0bc60d46326cd is 50, key is test_row_0/A:col10/1734125429136/Put/seqid=0 2024-12-13T21:30:29,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741845_1021 (size=12001) 2024-12-13T21:30:29,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/05c50dca0efb4334acc0bc60d46326cd 2024-12-13T21:30:29,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:29,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/fdd65570bd4649aebbe6b2aff9442bd1 is 50, key is test_row_0/B:col10/1734125429136/Put/seqid=0 2024-12-13T21:30:29,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741846_1022 (size=12001) 2024-12-13T21:30:29,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/fdd65570bd4649aebbe6b2aff9442bd1 2024-12-13T21:30:29,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/73d1837ece124332b5016056e297c9d0 is 50, key is test_row_0/C:col10/1734125429136/Put/seqid=0 2024-12-13T21:30:29,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741847_1023 (size=12001) 2024-12-13T21:30:29,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125489293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125489293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125489293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125489297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,340 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:29,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:29,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125489407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125489408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125489408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125489409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,498 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125489614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125489615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125489617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125489618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125489632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:29,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:29,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:29,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/73d1837ece124332b5016056e297c9d0 2024-12-13T21:30:29,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/05c50dca0efb4334acc0bc60d46326cd as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05c50dca0efb4334acc0bc60d46326cd 2024-12-13T21:30:29,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05c50dca0efb4334acc0bc60d46326cd, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:30:29,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/fdd65570bd4649aebbe6b2aff9442bd1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fdd65570bd4649aebbe6b2aff9442bd1 2024-12-13T21:30:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:29,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fdd65570bd4649aebbe6b2aff9442bd1, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:30:29,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/73d1837ece124332b5016056e297c9d0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/73d1837ece124332b5016056e297c9d0 2024-12-13T21:30:29,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/73d1837ece124332b5016056e297c9d0, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:30:29,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc66b42faaed28a8693a712966f73789 in 584ms, sequenceid=51, compaction requested=true 2024-12-13T21:30:29,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:29,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:29,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:29,727 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:29,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:29,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:29,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:29,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:29,727 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:29,731 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:29,732 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:29,732 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,733 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f12579fa62154ea88cea17b2dce6d562, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/bddfd5f0d68447f28966759f7685c2a3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fdd65570bd4649aebbe6b2aff9442bd1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.2 K 2024-12-13T21:30:29,734 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting f12579fa62154ea88cea17b2dce6d562, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734125427600 2024-12-13T21:30:29,735 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bddfd5f0d68447f28966759f7685c2a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734125427768 2024-12-13T21:30:29,736 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:29,736 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fdd65570bd4649aebbe6b2aff9442bd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125428492 2024-12-13T21:30:29,736 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:29,741 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,741 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fee04714259b47fcbc4b85477efaa914, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/38b39442bfe2498585495c7e87dac905, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05c50dca0efb4334acc0bc60d46326cd] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.4 K 2024-12-13T21:30:29,742 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fee04714259b47fcbc4b85477efaa914, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734125427600 2024-12-13T21:30:29,743 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38b39442bfe2498585495c7e87dac905, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734125427746 2024-12-13T21:30:29,743 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05c50dca0efb4334acc0bc60d46326cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125428492 2024-12-13T21:30:29,772 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#9 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:29,773 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/0cdeb1aec65d49c8bcbc27082055b6c5 is 50, key is test_row_0/B:col10/1734125429136/Put/seqid=0 2024-12-13T21:30:29,774 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#10 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:29,774 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/69d5c355c914456abbccfae606e2011b is 50, key is test_row_0/A:col10/1734125429136/Put/seqid=0 2024-12-13T21:30:29,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741849_1025 (size=12104) 2024-12-13T21:30:29,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T21:30:29,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,810 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:30:29,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:29,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:29,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:29,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:29,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:29,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:29,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741848_1024 (size=12104) 2024-12-13T21:30:29,821 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/0cdeb1aec65d49c8bcbc27082055b6c5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0cdeb1aec65d49c8bcbc27082055b6c5 2024-12-13T21:30:29,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/5b78f9661c45491a954f8a0cb0b231b1 is 50, key is test_row_0/A:col10/1734125429289/Put/seqid=0 2024-12-13T21:30:29,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741850_1026 (size=12001) 2024-12-13T21:30:29,849 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/5b78f9661c45491a954f8a0cb0b231b1 2024-12-13T21:30:29,865 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 0cdeb1aec65d49c8bcbc27082055b6c5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:29,865 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:29,866 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125429727; duration=0sec 2024-12-13T21:30:29,867 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:29,867 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:29,867 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:29,870 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:29,870 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:29,870 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:29,871 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e3d41718a04943238e85358014990b29, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4bad27e334504d8b82a34e8b5e31ed7c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/73d1837ece124332b5016056e297c9d0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.2 K 2024-12-13T21:30:29,872 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e3d41718a04943238e85358014990b29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734125427600 2024-12-13T21:30:29,873 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bad27e334504d8b82a34e8b5e31ed7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734125427768 2024-12-13T21:30:29,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/d830452fd225448aae09307ecd6637b0 is 50, key is test_row_0/B:col10/1734125429289/Put/seqid=0 2024-12-13T21:30:29,874 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 73d1837ece124332b5016056e297c9d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125428492 2024-12-13T21:30:29,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741851_1027 (size=12001) 2024-12-13T21:30:29,887 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/d830452fd225448aae09307ecd6637b0 2024-12-13T21:30:29,901 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#13 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:29,902 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/aba6b079b2534447a90ab67c2c7636f9 is 50, key is test_row_0/C:col10/1734125429136/Put/seqid=0 2024-12-13T21:30:29,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/a4be1c4195d4410182fd51caefc018ca is 50, key is test_row_0/C:col10/1734125429289/Put/seqid=0 2024-12-13T21:30:29,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:29,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741852_1028 (size=12104) 2024-12-13T21:30:29,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:29,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741853_1029 (size=12001) 2024-12-13T21:30:29,944 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/aba6b079b2534447a90ab67c2c7636f9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/aba6b079b2534447a90ab67c2c7636f9 2024-12-13T21:30:29,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125489939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125489943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125489946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125489950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:29,960 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into aba6b079b2534447a90ab67c2c7636f9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:29,960 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:29,960 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125429727; duration=0sec 2024-12-13T21:30:29,961 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:29,961 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:30,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125490052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125490054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125490054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125490055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,233 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/69d5c355c914456abbccfae606e2011b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/69d5c355c914456abbccfae606e2011b 2024-12-13T21:30:30,251 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 69d5c355c914456abbccfae606e2011b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:30,251 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:30,252 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125429725; duration=0sec 2024-12-13T21:30:30,252 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:30,252 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:30,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125490258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125490259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125490260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125490261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,342 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/a4be1c4195d4410182fd51caefc018ca 2024-12-13T21:30:30,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/5b78f9661c45491a954f8a0cb0b231b1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/5b78f9661c45491a954f8a0cb0b231b1 2024-12-13T21:30:30,392 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/5b78f9661c45491a954f8a0cb0b231b1, entries=150, sequenceid=76, filesize=11.7 K 2024-12-13T21:30:30,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/d830452fd225448aae09307ecd6637b0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/d830452fd225448aae09307ecd6637b0 2024-12-13T21:30:30,410 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/d830452fd225448aae09307ecd6637b0, entries=150, sequenceid=76, filesize=11.7 K 2024-12-13T21:30:30,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/a4be1c4195d4410182fd51caefc018ca as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a4be1c4195d4410182fd51caefc018ca 2024-12-13T21:30:30,429 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a4be1c4195d4410182fd51caefc018ca, entries=150, sequenceid=76, filesize=11.7 K 2024-12-13T21:30:30,430 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cc66b42faaed28a8693a712966f73789 in 620ms, sequenceid=76, compaction requested=false 2024-12-13T21:30:30,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:30,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:30,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-13T21:30:30,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-13T21:30:30,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-13T21:30:30,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8450 sec 2024-12-13T21:30:30,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.8630 sec 2024-12-13T21:30:30,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:30:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:30,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:30,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/ca54c30942a14781ab1bb8f397a58a77 is 50, key is test_row_0/A:col10/1734125430567/Put/seqid=0 2024-12-13T21:30:30,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741854_1030 (size=12001) 2024-12-13T21:30:30,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/ca54c30942a14781ab1bb8f397a58a77 2024-12-13T21:30:30,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125490618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125490619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125490621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/92b2a61b524342c385b6f512ebc1de97 is 50, key is test_row_0/B:col10/1734125430567/Put/seqid=0 2024-12-13T21:30:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125490624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125490642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741855_1031 (size=12001) 2024-12-13T21:30:30,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/92b2a61b524342c385b6f512ebc1de97 2024-12-13T21:30:30,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8c532a692517461590cadee3e189bc88 is 50, key is test_row_0/C:col10/1734125430567/Put/seqid=0 2024-12-13T21:30:30,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741856_1032 (size=12001) 2024-12-13T21:30:30,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8c532a692517461590cadee3e189bc88 2024-12-13T21:30:30,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125490725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125490728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125490730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/ca54c30942a14781ab1bb8f397a58a77 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ca54c30942a14781ab1bb8f397a58a77 2024-12-13T21:30:30,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125490734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ca54c30942a14781ab1bb8f397a58a77, entries=150, sequenceid=92, filesize=11.7 K 2024-12-13T21:30:30,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/92b2a61b524342c385b6f512ebc1de97 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/92b2a61b524342c385b6f512ebc1de97 2024-12-13T21:30:30,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/92b2a61b524342c385b6f512ebc1de97, entries=150, sequenceid=92, filesize=11.7 K 2024-12-13T21:30:30,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8c532a692517461590cadee3e189bc88 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8c532a692517461590cadee3e189bc88 2024-12-13T21:30:30,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8c532a692517461590cadee3e189bc88, entries=150, sequenceid=92, filesize=11.7 K 2024-12-13T21:30:30,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cc66b42faaed28a8693a712966f73789 in 218ms, sequenceid=92, compaction requested=true 2024-12-13T21:30:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:30,790 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:30,791 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:30,794 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:30,794 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:30,795 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:30,795 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:30,795 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:30,795 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:30,795 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0cdeb1aec65d49c8bcbc27082055b6c5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/d830452fd225448aae09307ecd6637b0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/92b2a61b524342c385b6f512ebc1de97] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.3 K 2024-12-13T21:30:30,796 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/69d5c355c914456abbccfae606e2011b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/5b78f9661c45491a954f8a0cb0b231b1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ca54c30942a14781ab1bb8f397a58a77] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.3 K 2024-12-13T21:30:30,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:30,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:30,796 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cdeb1aec65d49c8bcbc27082055b6c5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125428492 2024-12-13T21:30:30,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:30,797 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69d5c355c914456abbccfae606e2011b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125428492 2024-12-13T21:30:30,798 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d830452fd225448aae09307ecd6637b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734125429289 2024-12-13T21:30:30,798 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b78f9661c45491a954f8a0cb0b231b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734125429289 2024-12-13T21:30:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:30,799 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 92b2a61b524342c385b6f512ebc1de97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734125429947 2024-12-13T21:30:30,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca54c30942a14781ab1bb8f397a58a77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734125429947 2024-12-13T21:30:30,823 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:30,824 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/a9beb0cb891e4ace9861f20b25009ec0 is 50, key is test_row_0/B:col10/1734125430567/Put/seqid=0 2024-12-13T21:30:30,834 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:30,835 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/79b0dd5886314a87ae6407269eb59f98 is 50, key is test_row_0/A:col10/1734125430567/Put/seqid=0 2024-12-13T21:30:30,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741857_1033 (size=12207) 2024-12-13T21:30:30,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741858_1034 (size=12207) 2024-12-13T21:30:30,864 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/a9beb0cb891e4ace9861f20b25009ec0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a9beb0cb891e4ace9861f20b25009ec0 2024-12-13T21:30:30,871 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/79b0dd5886314a87ae6407269eb59f98 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/79b0dd5886314a87ae6407269eb59f98 2024-12-13T21:30:30,881 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into a9beb0cb891e4ace9861f20b25009ec0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:30,881 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:30,881 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125430791; duration=0sec 2024-12-13T21:30:30,881 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:30,881 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:30,881 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:30,884 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:30,884 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:30,884 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:30,884 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/aba6b079b2534447a90ab67c2c7636f9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a4be1c4195d4410182fd51caefc018ca, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8c532a692517461590cadee3e189bc88] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.3 K 2024-12-13T21:30:30,885 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 79b0dd5886314a87ae6407269eb59f98(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:30,885 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:30,885 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125430790; duration=0sec 2024-12-13T21:30:30,885 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:30,885 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting aba6b079b2534447a90ab67c2c7636f9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125428492 2024-12-13T21:30:30,885 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:30,886 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a4be1c4195d4410182fd51caefc018ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734125429289 2024-12-13T21:30:30,887 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c532a692517461590cadee3e189bc88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734125429947 2024-12-13T21:30:30,911 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#20 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:30,913 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/42d84ac64f8b4e40a49e04bcf455fd5d is 50, key is test_row_0/C:col10/1734125430567/Put/seqid=0 2024-12-13T21:30:30,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741859_1035 (size=12207) 2024-12-13T21:30:30,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:30,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:30:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:30,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:30,944 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/42d84ac64f8b4e40a49e04bcf455fd5d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/42d84ac64f8b4e40a49e04bcf455fd5d 2024-12-13T21:30:30,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/7a1e30fe4c2641b89a9f10724a5cb670 is 50, key is test_row_0/A:col10/1734125430617/Put/seqid=0 2024-12-13T21:30:30,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,961 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 42d84ac64f8b4e40a49e04bcf455fd5d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:30,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125490955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,961 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:30,961 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125430796; duration=0sec 2024-12-13T21:30:30,961 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:30,961 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:30,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125490959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:30,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125490964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125490962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:30,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741860_1036 (size=12001) 2024-12-13T21:30:30,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/7a1e30fe4c2641b89a9f10724a5cb670 2024-12-13T21:30:30,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/37045db7b9c84c2dbfadb318d1e628ff is 50, key is test_row_0/B:col10/1734125430617/Put/seqid=0 2024-12-13T21:30:31,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741861_1037 (size=12001) 2024-12-13T21:30:31,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/37045db7b9c84c2dbfadb318d1e628ff 2024-12-13T21:30:31,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/0f0130ee5fdb46ccb9855b579d823711 is 50, key is test_row_0/C:col10/1734125430617/Put/seqid=0 2024-12-13T21:30:31,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125491063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125491064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125491071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125491071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741862_1038 (size=12001) 2024-12-13T21:30:31,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/0f0130ee5fdb46ccb9855b579d823711 2024-12-13T21:30:31,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/7a1e30fe4c2641b89a9f10724a5cb670 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/7a1e30fe4c2641b89a9f10724a5cb670 2024-12-13T21:30:31,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/7a1e30fe4c2641b89a9f10724a5cb670, entries=150, sequenceid=118, filesize=11.7 K 2024-12-13T21:30:31,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/37045db7b9c84c2dbfadb318d1e628ff as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/37045db7b9c84c2dbfadb318d1e628ff 2024-12-13T21:30:31,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/37045db7b9c84c2dbfadb318d1e628ff, entries=150, sequenceid=118, filesize=11.7 K 2024-12-13T21:30:31,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/0f0130ee5fdb46ccb9855b579d823711 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0f0130ee5fdb46ccb9855b579d823711 2024-12-13T21:30:31,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0f0130ee5fdb46ccb9855b579d823711, entries=150, sequenceid=118, filesize=11.7 K 2024-12-13T21:30:31,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for cc66b42faaed28a8693a712966f73789 in 216ms, sequenceid=118, compaction requested=false 2024-12-13T21:30:31,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:31,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:31,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:30:31,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:31,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:31,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:31,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:31,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:31,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:31,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/41c709ef665447858a589b52ff3d1131 is 50, key is test_row_0/A:col10/1734125430954/Put/seqid=0 2024-12-13T21:30:31,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125491308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125491308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125491312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125491312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741863_1039 (size=12101) 2024-12-13T21:30:31,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/41c709ef665447858a589b52ff3d1131 2024-12-13T21:30:31,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/9ebf28b2dfca42eca27ce7f401f9cb86 is 50, key is test_row_0/B:col10/1734125430954/Put/seqid=0 2024-12-13T21:30:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741864_1040 (size=12101) 2024-12-13T21:30:31,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/9ebf28b2dfca42eca27ce7f401f9cb86 2024-12-13T21:30:31,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1b25e6415fe24d6785f84a92347eafc5 is 50, key is test_row_0/C:col10/1734125430954/Put/seqid=0 2024-12-13T21:30:31,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125491414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125491415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125491418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125491418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741865_1041 (size=12101) 2024-12-13T21:30:31,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1b25e6415fe24d6785f84a92347eafc5 2024-12-13T21:30:31,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/41c709ef665447858a589b52ff3d1131 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/41c709ef665447858a589b52ff3d1131 2024-12-13T21:30:31,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/41c709ef665447858a589b52ff3d1131, entries=150, sequenceid=133, filesize=11.8 K 2024-12-13T21:30:31,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/9ebf28b2dfca42eca27ce7f401f9cb86 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ebf28b2dfca42eca27ce7f401f9cb86 2024-12-13T21:30:31,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ebf28b2dfca42eca27ce7f401f9cb86, entries=150, sequenceid=133, filesize=11.8 K 2024-12-13T21:30:31,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1b25e6415fe24d6785f84a92347eafc5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1b25e6415fe24d6785f84a92347eafc5 2024-12-13T21:30:31,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1b25e6415fe24d6785f84a92347eafc5, entries=150, sequenceid=133, filesize=11.8 K 2024-12-13T21:30:31,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for cc66b42faaed28a8693a712966f73789 in 215ms, sequenceid=133, compaction requested=true 2024-12-13T21:30:31,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:31,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:31,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:31,486 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:31,486 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:31,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:31,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:31,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:31,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:31,488 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:31,488 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:31,488 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:31,488 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:31,488 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:31,488 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:31,489 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a9beb0cb891e4ace9861f20b25009ec0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/37045db7b9c84c2dbfadb318d1e628ff, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ebf28b2dfca42eca27ce7f401f9cb86] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.5 K 2024-12-13T21:30:31,489 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/79b0dd5886314a87ae6407269eb59f98, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/7a1e30fe4c2641b89a9f10724a5cb670, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/41c709ef665447858a589b52ff3d1131] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.5 K 2024-12-13T21:30:31,490 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a9beb0cb891e4ace9861f20b25009ec0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734125429947 2024-12-13T21:30:31,491 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79b0dd5886314a87ae6407269eb59f98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734125429947 2024-12-13T21:30:31,491 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 37045db7b9c84c2dbfadb318d1e628ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734125430605 2024-12-13T21:30:31,492 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ebf28b2dfca42eca27ce7f401f9cb86, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734125430954 2024-12-13T21:30:31,493 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a1e30fe4c2641b89a9f10724a5cb670, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734125430605 2024-12-13T21:30:31,494 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41c709ef665447858a589b52ff3d1131, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734125430954 2024-12-13T21:30:31,513 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#27 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:31,513 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/6eeaa293a0a44e469bcebf0eeecb620b is 50, key is test_row_0/B:col10/1734125430954/Put/seqid=0 2024-12-13T21:30:31,517 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#28 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:31,518 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/d2140b6c38bc48cf88421c5e64197df8 is 50, key is test_row_0/A:col10/1734125430954/Put/seqid=0 2024-12-13T21:30:31,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741866_1042 (size=12409) 2024-12-13T21:30:31,553 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/6eeaa293a0a44e469bcebf0eeecb620b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6eeaa293a0a44e469bcebf0eeecb620b 2024-12-13T21:30:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741867_1043 (size=12409) 2024-12-13T21:30:31,574 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/d2140b6c38bc48cf88421c5e64197df8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/d2140b6c38bc48cf88421c5e64197df8 2024-12-13T21:30:31,579 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 6eeaa293a0a44e469bcebf0eeecb620b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:31,580 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:31,580 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125431486; duration=0sec 2024-12-13T21:30:31,580 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:31,580 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:31,580 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:31,585 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:31,585 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:31,585 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:31,586 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/42d84ac64f8b4e40a49e04bcf455fd5d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0f0130ee5fdb46ccb9855b579d823711, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1b25e6415fe24d6785f84a92347eafc5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=35.5 K 2024-12-13T21:30:31,587 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 42d84ac64f8b4e40a49e04bcf455fd5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734125429947 2024-12-13T21:30:31,588 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into d2140b6c38bc48cf88421c5e64197df8(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:31,588 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:31,588 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125431485; duration=0sec 2024-12-13T21:30:31,588 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:31,588 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:31,590 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f0130ee5fdb46ccb9855b579d823711, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734125430605 2024-12-13T21:30:31,591 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b25e6415fe24d6785f84a92347eafc5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734125430954 2024-12-13T21:30:31,613 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#29 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:31,614 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/93c683220bee4983862b183770322871 is 50, key is test_row_0/C:col10/1734125430954/Put/seqid=0 2024-12-13T21:30:31,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:31,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-13T21:30:31,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:31,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:31,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:31,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:31,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:31,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:31,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741868_1044 (size=12409) 2024-12-13T21:30:31,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/3730fd47b68b453a84e48268b22aaf4f is 50, key is test_row_0/A:col10/1734125431310/Put/seqid=0 2024-12-13T21:30:31,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125491640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125491640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125491643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125491645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741869_1045 (size=12151) 2024-12-13T21:30:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T21:30:31,697 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-13T21:30:31,700 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-13T21:30:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T21:30:31,707 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:31,709 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:31,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:31,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125491747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125491748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125491748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125491751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T21:30:31,863 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T21:30:31,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:31,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:31,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:31,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:31,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:31,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125491952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125491953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125491953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:31,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125491956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T21:30:32,018 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T21:30:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:32,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,048 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/93c683220bee4983862b183770322871 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/93c683220bee4983862b183770322871 2024-12-13T21:30:32,058 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 93c683220bee4983862b183770322871(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:32,058 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:32,058 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125431487; duration=0sec 2024-12-13T21:30:32,059 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:32,059 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:32,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/3730fd47b68b453a84e48268b22aaf4f 2024-12-13T21:30:32,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/85c4d8f4bc964ba7ada97d9c7ec83d4b is 50, key is test_row_0/B:col10/1734125431310/Put/seqid=0 2024-12-13T21:30:32,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741870_1046 (size=12151) 2024-12-13T21:30:32,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/85c4d8f4bc964ba7ada97d9c7ec83d4b 2024-12-13T21:30:32,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/35d91f5e558f4d2ca2a85c1098d91806 is 50, key is test_row_0/C:col10/1734125431310/Put/seqid=0 2024-12-13T21:30:32,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741871_1047 (size=12151) 2024-12-13T21:30:32,172 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T21:30:32,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:32,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125492258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125492258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125492260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125492262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T21:30:32,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T21:30:32,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:32,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,482 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T21:30:32,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:32,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/35d91f5e558f4d2ca2a85c1098d91806 2024-12-13T21:30:32,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/3730fd47b68b453a84e48268b22aaf4f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3730fd47b68b453a84e48268b22aaf4f 2024-12-13T21:30:32,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3730fd47b68b453a84e48268b22aaf4f, entries=150, sequenceid=159, filesize=11.9 K 2024-12-13T21:30:32,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/85c4d8f4bc964ba7ada97d9c7ec83d4b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/85c4d8f4bc964ba7ada97d9c7ec83d4b 2024-12-13T21:30:32,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/85c4d8f4bc964ba7ada97d9c7ec83d4b, entries=150, sequenceid=159, filesize=11.9 K 2024-12-13T21:30:32,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/35d91f5e558f4d2ca2a85c1098d91806 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/35d91f5e558f4d2ca2a85c1098d91806 2024-12-13T21:30:32,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/35d91f5e558f4d2ca2a85c1098d91806, entries=150, sequenceid=159, filesize=11.9 K 2024-12-13T21:30:32,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for cc66b42faaed28a8693a712966f73789 in 965ms, sequenceid=159, compaction requested=false 2024-12-13T21:30:32,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:32,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T21:30:32,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,638 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:30:32,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:32,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:32,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:32,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:32,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:32,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:32,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/4a409301fc1b4ce6a7d8e84dba4cf05b is 50, key is test_row_0/A:col10/1734125431626/Put/seqid=0 2024-12-13T21:30:32,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:32,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:32,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741872_1048 (size=12151) 2024-12-13T21:30:32,666 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/4a409301fc1b4ce6a7d8e84dba4cf05b 2024-12-13T21:30:32,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/ce072c9edea342b687145d333f3ea2e7 is 50, key is test_row_0/B:col10/1734125431626/Put/seqid=0 2024-12-13T21:30:32,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741873_1049 (size=12151) 2024-12-13T21:30:32,713 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/ce072c9edea342b687145d333f3ea2e7 2024-12-13T21:30:32,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/5fe546c27d084242ab7becb55c0b5260 is 50, key is test_row_0/C:col10/1734125431626/Put/seqid=0 2024-12-13T21:30:32,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741874_1050 (size=12151) 2024-12-13T21:30:32,744 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/5fe546c27d084242ab7becb55c0b5260 2024-12-13T21:30:32,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125492747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/4a409301fc1b4ce6a7d8e84dba4cf05b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/4a409301fc1b4ce6a7d8e84dba4cf05b 2024-12-13T21:30:32,763 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/4a409301fc1b4ce6a7d8e84dba4cf05b, entries=150, sequenceid=172, filesize=11.9 K 2024-12-13T21:30:32,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125492761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125492762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125492762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/ce072c9edea342b687145d333f3ea2e7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce072c9edea342b687145d333f3ea2e7 2024-12-13T21:30:32,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125492766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,774 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce072c9edea342b687145d333f3ea2e7, entries=150, sequenceid=172, filesize=11.9 K 2024-12-13T21:30:32,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/5fe546c27d084242ab7becb55c0b5260 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5fe546c27d084242ab7becb55c0b5260 2024-12-13T21:30:32,786 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5fe546c27d084242ab7becb55c0b5260, entries=150, sequenceid=172, filesize=11.9 K 2024-12-13T21:30:32,788 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cc66b42faaed28a8693a712966f73789 in 150ms, sequenceid=172, compaction requested=true 2024-12-13T21:30:32,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:32,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-13T21:30:32,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-13T21:30:32,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-13T21:30:32,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0810 sec 2024-12-13T21:30:32,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.0950 sec 2024-12-13T21:30:32,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T21:30:32,808 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-13T21:30:32,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:32,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-13T21:30:32,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T21:30:32,813 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:32,814 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:32,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:32,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:32,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:30:32,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:32,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:32,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:32,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:32,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:32,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:32,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/a0f1d276bf244d76b167a605f83db961 is 50, key is test_row_0/A:col10/1734125432854/Put/seqid=0 2024-12-13T21:30:32,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741875_1051 (size=12151) 2024-12-13T21:30:32,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/a0f1d276bf244d76b167a605f83db961 2024-12-13T21:30:32,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:32,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125492897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/ce0669b387c74f6c9c72b5a9ec2aa96f is 50, key is test_row_0/B:col10/1734125432854/Put/seqid=0 2024-12-13T21:30:32,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T21:30:32,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741876_1052 (size=12151) 2024-12-13T21:30:32,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/ce0669b387c74f6c9c72b5a9ec2aa96f 2024-12-13T21:30:32,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/f9d79e486b094ffcbbd024e5595df534 is 50, key is test_row_0/C:col10/1734125432854/Put/seqid=0 2024-12-13T21:30:32,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741877_1053 (size=12151) 2024-12-13T21:30:32,967 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:32,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/f9d79e486b094ffcbbd024e5595df534 2024-12-13T21:30:32,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-13T21:30:32,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:32,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:32,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:32,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/a0f1d276bf244d76b167a605f83db961 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a0f1d276bf244d76b167a605f83db961 2024-12-13T21:30:32,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a0f1d276bf244d76b167a605f83db961, entries=150, sequenceid=197, filesize=11.9 K 2024-12-13T21:30:32,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/ce0669b387c74f6c9c72b5a9ec2aa96f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce0669b387c74f6c9c72b5a9ec2aa96f 2024-12-13T21:30:33,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce0669b387c74f6c9c72b5a9ec2aa96f, entries=150, sequenceid=197, filesize=11.9 K 2024-12-13T21:30:33,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/f9d79e486b094ffcbbd024e5595df534 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f9d79e486b094ffcbbd024e5595df534 2024-12-13T21:30:33,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125493003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f9d79e486b094ffcbbd024e5595df534, entries=150, sequenceid=197, filesize=11.9 K 2024-12-13T21:30:33,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cc66b42faaed28a8693a712966f73789 in 157ms, sequenceid=197, compaction requested=true 2024-12-13T21:30:33,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:33,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:33,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:33,014 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:30:33,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:33,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:33,014 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:30:33,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:33,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:33,017 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:30:33,017 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:33,018 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:33,018 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6eeaa293a0a44e469bcebf0eeecb620b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/85c4d8f4bc964ba7ada97d9c7ec83d4b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce072c9edea342b687145d333f3ea2e7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce0669b387c74f6c9c72b5a9ec2aa96f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=47.7 K 2024-12-13T21:30:33,019 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:30:33,019 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:33,019 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:33,019 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/d2140b6c38bc48cf88421c5e64197df8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3730fd47b68b453a84e48268b22aaf4f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/4a409301fc1b4ce6a7d8e84dba4cf05b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a0f1d276bf244d76b167a605f83db961] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=47.7 K 2024-12-13T21:30:33,020 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6eeaa293a0a44e469bcebf0eeecb620b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734125430954 2024-12-13T21:30:33,020 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2140b6c38bc48cf88421c5e64197df8, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734125430954 2024-12-13T21:30:33,021 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3730fd47b68b453a84e48268b22aaf4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734125431305 2024-12-13T21:30:33,021 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 85c4d8f4bc964ba7ada97d9c7ec83d4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734125431305 2024-12-13T21:30:33,022 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ce072c9edea342b687145d333f3ea2e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734125431626 2024-12-13T21:30:33,022 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a409301fc1b4ce6a7d8e84dba4cf05b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734125431626 2024-12-13T21:30:33,023 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ce0669b387c74f6c9c72b5a9ec2aa96f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734125432711 2024-12-13T21:30:33,024 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0f1d276bf244d76b167a605f83db961, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734125432711 2024-12-13T21:30:33,052 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#39 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:33,053 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/b8669f1a155445aa899424c9efccd830 is 50, key is test_row_0/A:col10/1734125432854/Put/seqid=0 2024-12-13T21:30:33,058 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#40 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:33,059 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/33cba21b846847a186991789f26b740b is 50, key is test_row_0/B:col10/1734125432854/Put/seqid=0 2024-12-13T21:30:33,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741878_1054 (size=12595) 2024-12-13T21:30:33,081 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/b8669f1a155445aa899424c9efccd830 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b8669f1a155445aa899424c9efccd830 2024-12-13T21:30:33,092 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into b8669f1a155445aa899424c9efccd830(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:33,092 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:33,092 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=12, startTime=1734125433013; duration=0sec 2024-12-13T21:30:33,092 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:33,092 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:33,093 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:30:33,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:30:33,096 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:33,096 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:33,097 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/93c683220bee4983862b183770322871, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/35d91f5e558f4d2ca2a85c1098d91806, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5fe546c27d084242ab7becb55c0b5260, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f9d79e486b094ffcbbd024e5595df534] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=47.7 K 2024-12-13T21:30:33,098 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93c683220bee4983862b183770322871, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734125430954 2024-12-13T21:30:33,099 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35d91f5e558f4d2ca2a85c1098d91806, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734125431305 2024-12-13T21:30:33,100 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fe546c27d084242ab7becb55c0b5260, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734125431626 2024-12-13T21:30:33,100 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9d79e486b094ffcbbd024e5595df534, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734125432711 2024-12-13T21:30:33,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741879_1055 (size=12595) 2024-12-13T21:30:33,112 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/33cba21b846847a186991789f26b740b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/33cba21b846847a186991789f26b740b 2024-12-13T21:30:33,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T21:30:33,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-13T21:30:33,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:33,122 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:30:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:33,124 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 33cba21b846847a186991789f26b740b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:33,125 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:33,125 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=12, startTime=1734125433014; duration=0sec 2024-12-13T21:30:33,125 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:33,125 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:33,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/ee2c3ae622af40268def7c7036fbae3f is 50, key is test_row_1/A:col10/1734125432888/Put/seqid=0 2024-12-13T21:30:33,159 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#42 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:33,160 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/160b33184d2445f7a76af6a7ea73fb05 is 50, key is test_row_0/C:col10/1734125432854/Put/seqid=0 2024-12-13T21:30:33,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741880_1056 (size=9757) 2024-12-13T21:30:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741881_1057 (size=12595) 2024-12-13T21:30:33,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:33,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:33,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125493317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T21:30:33,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125493420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,598 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/ee2c3ae622af40268def7c7036fbae3f 2024-12-13T21:30:33,610 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/160b33184d2445f7a76af6a7ea73fb05 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/160b33184d2445f7a76af6a7ea73fb05 2024-12-13T21:30:33,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/77d92a7d49b34883aa1523005cf8459c is 50, key is test_row_1/B:col10/1734125432888/Put/seqid=0 2024-12-13T21:30:33,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125493625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,629 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 160b33184d2445f7a76af6a7ea73fb05(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:33,629 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:33,629 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=12, startTime=1734125433015; duration=0sec 2024-12-13T21:30:33,629 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:33,629 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:33,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741882_1058 (size=9757) 2024-12-13T21:30:33,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125493771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125493772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125493774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125493776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:33,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T21:30:33,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:33,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125493927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:34,038 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/77d92a7d49b34883aa1523005cf8459c 2024-12-13T21:30:34,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8359d1745d824f1ebf43983f83e5a4b2 is 50, key is test_row_1/C:col10/1734125432888/Put/seqid=0 2024-12-13T21:30:34,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741883_1059 (size=9757) 2024-12-13T21:30:34,076 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8359d1745d824f1ebf43983f83e5a4b2 2024-12-13T21:30:34,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/ee2c3ae622af40268def7c7036fbae3f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ee2c3ae622af40268def7c7036fbae3f 2024-12-13T21:30:34,093 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ee2c3ae622af40268def7c7036fbae3f, entries=100, sequenceid=210, filesize=9.5 K 2024-12-13T21:30:34,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/77d92a7d49b34883aa1523005cf8459c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/77d92a7d49b34883aa1523005cf8459c 2024-12-13T21:30:34,106 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/77d92a7d49b34883aa1523005cf8459c, entries=100, sequenceid=210, filesize=9.5 K 2024-12-13T21:30:34,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8359d1745d824f1ebf43983f83e5a4b2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8359d1745d824f1ebf43983f83e5a4b2 2024-12-13T21:30:34,120 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8359d1745d824f1ebf43983f83e5a4b2, entries=100, sequenceid=210, filesize=9.5 K 2024-12-13T21:30:34,123 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc66b42faaed28a8693a712966f73789 in 1001ms, sequenceid=210, compaction requested=false 2024-12-13T21:30:34,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:34,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:34,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-13T21:30:34,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-13T21:30:34,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-13T21:30:34,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3130 sec 2024-12-13T21:30:34,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.3210 sec 2024-12-13T21:30:34,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:34,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-13T21:30:34,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:34,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:34,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:34,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:34,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:34,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:34,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/98c4ad085e9d4df486f34fcc0ff55e11 is 50, key is test_row_0/A:col10/1734125433298/Put/seqid=0 2024-12-13T21:30:34,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741884_1060 (size=14541) 2024-12-13T21:30:34,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/98c4ad085e9d4df486f34fcc0ff55e11 2024-12-13T21:30:34,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/93f13f061f764663a893ce670c220dcf is 50, key is test_row_0/B:col10/1734125433298/Put/seqid=0 2024-12-13T21:30:34,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:34,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125494476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:34,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741885_1061 (size=12151) 2024-12-13T21:30:34,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/93f13f061f764663a893ce670c220dcf 2024-12-13T21:30:34,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e384c4131f944028a25fa03ca7cb9795 is 50, key is test_row_0/C:col10/1734125433298/Put/seqid=0 2024-12-13T21:30:34,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741886_1062 (size=12151) 2024-12-13T21:30:34,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:34,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125494578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:34,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:34,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125494781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:34,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e384c4131f944028a25fa03ca7cb9795 2024-12-13T21:30:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T21:30:34,919 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-13T21:30:34,921 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-13T21:30:34,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-13T21:30:34,925 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:34,927 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:34,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:34,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/98c4ad085e9d4df486f34fcc0ff55e11 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/98c4ad085e9d4df486f34fcc0ff55e11 2024-12-13T21:30:34,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/98c4ad085e9d4df486f34fcc0ff55e11, entries=200, sequenceid=237, filesize=14.2 K 2024-12-13T21:30:34,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/93f13f061f764663a893ce670c220dcf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/93f13f061f764663a893ce670c220dcf 2024-12-13T21:30:34,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/93f13f061f764663a893ce670c220dcf, entries=150, sequenceid=237, filesize=11.9 K 2024-12-13T21:30:34,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e384c4131f944028a25fa03ca7cb9795 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e384c4131f944028a25fa03ca7cb9795 2024-12-13T21:30:34,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e384c4131f944028a25fa03ca7cb9795, entries=150, sequenceid=237, filesize=11.9 K 2024-12-13T21:30:34,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cc66b42faaed28a8693a712966f73789 in 529ms, sequenceid=237, compaction requested=true 2024-12-13T21:30:34,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:34,965 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:34,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:34,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:34,966 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:34,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:34,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:34,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:34,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:34,967 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36893 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:34,967 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:34,967 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:34,967 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b8669f1a155445aa899424c9efccd830, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ee2c3ae622af40268def7c7036fbae3f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/98c4ad085e9d4df486f34fcc0ff55e11] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.0 K 2024-12-13T21:30:34,967 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:34,968 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:34,968 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:34,968 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/33cba21b846847a186991789f26b740b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/77d92a7d49b34883aa1523005cf8459c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/93f13f061f764663a893ce670c220dcf] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=33.7 K 2024-12-13T21:30:34,968 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8669f1a155445aa899424c9efccd830, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734125432711 2024-12-13T21:30:34,968 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 33cba21b846847a186991789f26b740b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734125432711 2024-12-13T21:30:34,969 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d92a7d49b34883aa1523005cf8459c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734125432888 2024-12-13T21:30:34,969 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee2c3ae622af40268def7c7036fbae3f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734125432888 2024-12-13T21:30:34,970 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98c4ad085e9d4df486f34fcc0ff55e11, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125433298 2024-12-13T21:30:34,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 93f13f061f764663a893ce670c220dcf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125433298 2024-12-13T21:30:34,985 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:34,986 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/21e64d47416d4c14a27842c43cfac3f9 is 50, key is test_row_0/B:col10/1734125433298/Put/seqid=0 2024-12-13T21:30:34,997 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:34,998 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/813673e8e5494c6f93e281b1d9718c5c is 50, key is test_row_0/A:col10/1734125433298/Put/seqid=0 2024-12-13T21:30:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741887_1063 (size=12697) 2024-12-13T21:30:35,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741888_1064 (size=12697) 2024-12-13T21:30:35,024 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/21e64d47416d4c14a27842c43cfac3f9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/21e64d47416d4c14a27842c43cfac3f9 2024-12-13T21:30:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-13T21:30:35,035 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/813673e8e5494c6f93e281b1d9718c5c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/813673e8e5494c6f93e281b1d9718c5c 2024-12-13T21:30:35,038 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 21e64d47416d4c14a27842c43cfac3f9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:35,039 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:35,039 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125434965; duration=0sec 2024-12-13T21:30:35,039 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:35,039 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:35,040 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:35,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:35,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:35,042 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,042 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/160b33184d2445f7a76af6a7ea73fb05, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8359d1745d824f1ebf43983f83e5a4b2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e384c4131f944028a25fa03ca7cb9795] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=33.7 K 2024-12-13T21:30:35,042 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 160b33184d2445f7a76af6a7ea73fb05, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734125432711 2024-12-13T21:30:35,043 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8359d1745d824f1ebf43983f83e5a4b2, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734125432888 2024-12-13T21:30:35,044 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e384c4131f944028a25fa03ca7cb9795, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125433298 2024-12-13T21:30:35,047 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 813673e8e5494c6f93e281b1d9718c5c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:35,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:35,047 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125434965; duration=0sec 2024-12-13T21:30:35,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:35,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:35,064 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#50 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:35,064 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ad9be2d299a24acea784456da2a72b44 is 50, key is test_row_0/C:col10/1734125433298/Put/seqid=0 2024-12-13T21:30:35,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-13T21:30:35,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,082 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-13T21:30:35,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:35,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:35,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:35,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:35,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:35,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:35,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:35,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/883a9fb9cbee4110beb670a0ce3e58fc is 50, key is test_row_0/A:col10/1734125434469/Put/seqid=0 2024-12-13T21:30:35,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741889_1065 (size=12697) 2024-12-13T21:30:35,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741890_1066 (size=12151) 2024-12-13T21:30:35,123 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/883a9fb9cbee4110beb670a0ce3e58fc 2024-12-13T21:30:35,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/a53e39b2c4934548b4eaf3defe758339 is 50, key is test_row_0/B:col10/1734125434469/Put/seqid=0 2024-12-13T21:30:35,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741891_1067 (size=12151) 2024-12-13T21:30:35,176 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/a53e39b2c4934548b4eaf3defe758339 2024-12-13T21:30:35,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125495177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/99ecf9940b204bd5a1d96d78670a6151 is 50, key is test_row_0/C:col10/1734125434469/Put/seqid=0 2024-12-13T21:30:35,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741892_1068 (size=12151) 2024-12-13T21:30:35,210 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/99ecf9940b204bd5a1d96d78670a6151 2024-12-13T21:30:35,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/883a9fb9cbee4110beb670a0ce3e58fc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/883a9fb9cbee4110beb670a0ce3e58fc 2024-12-13T21:30:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-13T21:30:35,231 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/883a9fb9cbee4110beb670a0ce3e58fc, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:30:35,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/a53e39b2c4934548b4eaf3defe758339 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a53e39b2c4934548b4eaf3defe758339 2024-12-13T21:30:35,241 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a53e39b2c4934548b4eaf3defe758339, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:30:35,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/99ecf9940b204bd5a1d96d78670a6151 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/99ecf9940b204bd5a1d96d78670a6151 2024-12-13T21:30:35,251 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/99ecf9940b204bd5a1d96d78670a6151, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:30:35,253 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for cc66b42faaed28a8693a712966f73789 in 172ms, sequenceid=249, compaction requested=false 2024-12-13T21:30:35,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:35,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-13T21:30:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-13T21:30:35,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-13T21:30:35,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 328 msec 2024-12-13T21:30:35,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 337 msec 2024-12-13T21:30:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:35,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-13T21:30:35,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:35,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:35,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:35,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:35,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:35,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:35,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fc46f3bf9b934a0eb369bc496125558e is 50, key is test_row_0/A:col10/1734125435286/Put/seqid=0 2024-12-13T21:30:35,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741893_1069 (size=14741) 2024-12-13T21:30:35,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125495310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125495413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,516 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ad9be2d299a24acea784456da2a72b44 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ad9be2d299a24acea784456da2a72b44 2024-12-13T21:30:35,528 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into ad9be2d299a24acea784456da2a72b44(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:35,528 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:35,528 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125434966; duration=0sec 2024-12-13T21:30:35,528 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:35,528 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:35,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-13T21:30:35,530 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-13T21:30:35,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-13T21:30:35,534 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:35,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T21:30:35,536 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:35,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:35,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125495617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T21:30:35,689 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:35,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:35,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fc46f3bf9b934a0eb369bc496125558e 2024-12-13T21:30:35,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1 is 50, key is test_row_0/B:col10/1734125435286/Put/seqid=0 2024-12-13T21:30:35,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741894_1070 (size=12301) 2024-12-13T21:30:35,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125495782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,784 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:35,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125495786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125495787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,788 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:35,789 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:35,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125495792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,794 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T21:30:35,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:35,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:35,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125495920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:35,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:35,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:35,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:35,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:35,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1 2024-12-13T21:30:36,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T21:30:36,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e9488cb704964ddbaebd250395afc725 is 50, key is test_row_0/C:col10/1734125435286/Put/seqid=0 2024-12-13T21:30:36,162 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:36,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:36,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:36,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741895_1071 (size=12301) 2024-12-13T21:30:36,316 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:36,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:36,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:36,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125496424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:36,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:36,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:36,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:36,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:36,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e9488cb704964ddbaebd250395afc725 2024-12-13T21:30:36,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fc46f3bf9b934a0eb369bc496125558e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc46f3bf9b934a0eb369bc496125558e 2024-12-13T21:30:36,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc46f3bf9b934a0eb369bc496125558e, entries=200, sequenceid=276, filesize=14.4 K 2024-12-13T21:30:36,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1 2024-12-13T21:30:36,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1, entries=150, sequenceid=276, filesize=12.0 K 2024-12-13T21:30:36,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/e9488cb704964ddbaebd250395afc725 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e9488cb704964ddbaebd250395afc725 2024-12-13T21:30:36,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e9488cb704964ddbaebd250395afc725, entries=150, sequenceid=276, filesize=12.0 K 2024-12-13T21:30:36,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for cc66b42faaed28a8693a712966f73789 in 1319ms, sequenceid=276, compaction requested=true 2024-12-13T21:30:36,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:36,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:36,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:36,608 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:36,608 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:36,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:36,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:36,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:36,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:36,611 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:36,611 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:36,611 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:36,611 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,611 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:36,611 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,611 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/21e64d47416d4c14a27842c43cfac3f9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a53e39b2c4934548b4eaf3defe758339, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.3 K 2024-12-13T21:30:36,611 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/813673e8e5494c6f93e281b1d9718c5c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/883a9fb9cbee4110beb670a0ce3e58fc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc46f3bf9b934a0eb369bc496125558e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=38.7 K 2024-12-13T21:30:36,612 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 21e64d47416d4c14a27842c43cfac3f9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125433298 2024-12-13T21:30:36,612 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 813673e8e5494c6f93e281b1d9718c5c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125433298 2024-12-13T21:30:36,612 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a53e39b2c4934548b4eaf3defe758339, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125434438 2024-12-13T21:30:36,612 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 883a9fb9cbee4110beb670a0ce3e58fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125434438 2024-12-13T21:30:36,613 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fe62d5e6d3a4dd5b45e3b31f54b93e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734125435159 2024-12-13T21:30:36,613 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc46f3bf9b934a0eb369bc496125558e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734125435159 2024-12-13T21:30:36,626 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:36,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:36,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T21:30:36,627 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/123a956c21da476c8aa48d0d3b72ff96 is 50, key is test_row_0/A:col10/1734125435286/Put/seqid=0 2024-12-13T21:30:36,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,627 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-13T21:30:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:36,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:36,637 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#58 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:36,637 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/542b62f5d1534825a676343ad49de6d8 is 50, key is test_row_0/B:col10/1734125435286/Put/seqid=0 2024-12-13T21:30:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T21:30:36,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fc3ae0ae42a34013ab45b01768f3633d is 50, key is test_row_0/A:col10/1734125435304/Put/seqid=0 2024-12-13T21:30:36,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741896_1072 (size=12949) 2024-12-13T21:30:36,669 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/123a956c21da476c8aa48d0d3b72ff96 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/123a956c21da476c8aa48d0d3b72ff96 2024-12-13T21:30:36,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741898_1074 (size=12301) 2024-12-13T21:30:36,686 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 123a956c21da476c8aa48d0d3b72ff96(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:36,687 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:36,687 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125436608; duration=0sec 2024-12-13T21:30:36,687 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fc3ae0ae42a34013ab45b01768f3633d 2024-12-13T21:30:36,687 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:36,687 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:36,687 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:36,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:36,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:36,689 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,690 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ad9be2d299a24acea784456da2a72b44, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/99ecf9940b204bd5a1d96d78670a6151, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e9488cb704964ddbaebd250395afc725] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.3 K 2024-12-13T21:30:36,690 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad9be2d299a24acea784456da2a72b44, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125433298 2024-12-13T21:30:36,693 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99ecf9940b204bd5a1d96d78670a6151, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125434438 2024-12-13T21:30:36,697 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9488cb704964ddbaebd250395afc725, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734125435159 2024-12-13T21:30:36,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741897_1073 (size=12949) 2024-12-13T21:30:36,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/1c586708b882456c8fb18275e95729d5 is 50, key is test_row_0/B:col10/1734125435304/Put/seqid=0 2024-12-13T21:30:36,717 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/542b62f5d1534825a676343ad49de6d8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/542b62f5d1534825a676343ad49de6d8 2024-12-13T21:30:36,721 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:36,722 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/5899e9a29f254878a13b49fc117d0f0d is 50, key is test_row_0/C:col10/1734125435286/Put/seqid=0 2024-12-13T21:30:36,724 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 542b62f5d1534825a676343ad49de6d8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:36,724 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:36,724 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125436608; duration=0sec 2024-12-13T21:30:36,724 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:36,724 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:36,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741900_1076 (size=12949) 2024-12-13T21:30:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741899_1075 (size=12301) 2024-12-13T21:30:36,753 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/1c586708b882456c8fb18275e95729d5 2024-12-13T21:30:36,763 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/5899e9a29f254878a13b49fc117d0f0d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5899e9a29f254878a13b49fc117d0f0d 2024-12-13T21:30:36,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/baade9d899f241b9af09a5546a7fbcae is 50, key is test_row_0/C:col10/1734125435304/Put/seqid=0 2024-12-13T21:30:36,774 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 5899e9a29f254878a13b49fc117d0f0d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:36,774 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:36,775 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125436609; duration=0sec 2024-12-13T21:30:36,775 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:36,775 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741901_1077 (size=12301) 2024-12-13T21:30:36,814 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/baade9d899f241b9af09a5546a7fbcae 2024-12-13T21:30:36,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fc3ae0ae42a34013ab45b01768f3633d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc3ae0ae42a34013ab45b01768f3633d 2024-12-13T21:30:36,830 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc3ae0ae42a34013ab45b01768f3633d, entries=150, sequenceid=286, filesize=12.0 K 2024-12-13T21:30:36,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/1c586708b882456c8fb18275e95729d5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/1c586708b882456c8fb18275e95729d5 2024-12-13T21:30:36,846 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/1c586708b882456c8fb18275e95729d5, entries=150, sequenceid=286, filesize=12.0 K 2024-12-13T21:30:36,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/baade9d899f241b9af09a5546a7fbcae as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/baade9d899f241b9af09a5546a7fbcae 2024-12-13T21:30:36,858 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/baade9d899f241b9af09a5546a7fbcae, entries=150, sequenceid=286, filesize=12.0 K 2024-12-13T21:30:36,860 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for cc66b42faaed28a8693a712966f73789 in 233ms, sequenceid=286, compaction requested=false 2024-12-13T21:30:36,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:36,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:36,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-13T21:30:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-13T21:30:36,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-13T21:30:36,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3260 sec 2024-12-13T21:30:36,866 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.3330 sec 2024-12-13T21:30:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:37,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:30:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:37,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/1683cdfbf03b4a8fb3d07eafbe161f27 is 50, key is test_row_0/A:col10/1734125437448/Put/seqid=0 2024-12-13T21:30:37,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741902_1078 (size=12301) 2024-12-13T21:30:37,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/1683cdfbf03b4a8fb3d07eafbe161f27 2024-12-13T21:30:37,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7288437f174943e4a7b29a5a89958b7a is 50, key is test_row_0/B:col10/1734125437448/Put/seqid=0 2024-12-13T21:30:37,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741903_1079 (size=12301) 2024-12-13T21:30:37,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7288437f174943e4a7b29a5a89958b7a 2024-12-13T21:30:37,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125497519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:37,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4fcc7c0338004f4cad9bea1fb55b8bf1 is 50, key is test_row_0/C:col10/1734125437448/Put/seqid=0 2024-12-13T21:30:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741904_1080 (size=12301) 2024-12-13T21:30:37,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125497621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T21:30:37,640 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-13T21:30:37,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-13T21:30:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-13T21:30:37,643 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:37,643 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:37,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-13T21:30:37,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:37,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-13T21:30:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:37,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:37,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125497823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:37,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4fcc7c0338004f4cad9bea1fb55b8bf1 2024-12-13T21:30:37,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/1683cdfbf03b4a8fb3d07eafbe161f27 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1683cdfbf03b4a8fb3d07eafbe161f27 2024-12-13T21:30:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-13T21:30:37,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:37,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1683cdfbf03b4a8fb3d07eafbe161f27, entries=150, sequenceid=300, filesize=12.0 K 2024-12-13T21:30:37,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-13T21:30:37,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:37,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:37,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:37,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:37,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:37,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7288437f174943e4a7b29a5a89958b7a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7288437f174943e4a7b29a5a89958b7a 2024-12-13T21:30:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:37,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7288437f174943e4a7b29a5a89958b7a, entries=150, sequenceid=300, filesize=12.0 K 2024-12-13T21:30:37,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4fcc7c0338004f4cad9bea1fb55b8bf1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4fcc7c0338004f4cad9bea1fb55b8bf1 2024-12-13T21:30:37,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4fcc7c0338004f4cad9bea1fb55b8bf1, entries=150, sequenceid=300, filesize=12.0 K 2024-12-13T21:30:37,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc66b42faaed28a8693a712966f73789 in 516ms, sequenceid=300, compaction requested=true 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:37,969 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:37,969 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:37,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:37,970 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:37,970 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:37,971 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:37,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:37,971 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:37,971 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/542b62f5d1534825a676343ad49de6d8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/1c586708b882456c8fb18275e95729d5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7288437f174943e4a7b29a5a89958b7a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.7 K 2024-12-13T21:30:37,971 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:37,971 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/123a956c21da476c8aa48d0d3b72ff96, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc3ae0ae42a34013ab45b01768f3633d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1683cdfbf03b4a8fb3d07eafbe161f27] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.7 K 2024-12-13T21:30:37,972 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 542b62f5d1534825a676343ad49de6d8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734125435159 2024-12-13T21:30:37,972 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 123a956c21da476c8aa48d0d3b72ff96, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734125435159 2024-12-13T21:30:37,973 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c586708b882456c8fb18275e95729d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734125435302 2024-12-13T21:30:37,973 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc3ae0ae42a34013ab45b01768f3633d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734125435302 2024-12-13T21:30:37,973 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 7288437f174943e4a7b29a5a89958b7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1734125437436 2024-12-13T21:30:37,974 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1683cdfbf03b4a8fb3d07eafbe161f27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1734125437436 2024-12-13T21:30:37,993 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:37,994 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/0aa6762a430c4d178e6827ce46c41974 is 50, key is test_row_0/A:col10/1734125437448/Put/seqid=0 2024-12-13T21:30:38,001 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:38,002 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/9ce2d71a117d4803b8cd0dec1c5bea9b is 50, key is test_row_0/B:col10/1734125437448/Put/seqid=0 2024-12-13T21:30:38,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741905_1081 (size=13051) 2024-12-13T21:30:38,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741906_1082 (size=13051) 2024-12-13T21:30:38,024 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/0aa6762a430c4d178e6827ce46c41974 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/0aa6762a430c4d178e6827ce46c41974 2024-12-13T21:30:38,034 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 0aa6762a430c4d178e6827ce46c41974(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:38,034 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:38,034 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125437969; duration=0sec 2024-12-13T21:30:38,034 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:38,034 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:38,035 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:38,036 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:38,036 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:38,036 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:38,036 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5899e9a29f254878a13b49fc117d0f0d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/baade9d899f241b9af09a5546a7fbcae, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4fcc7c0338004f4cad9bea1fb55b8bf1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.7 K 2024-12-13T21:30:38,037 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5899e9a29f254878a13b49fc117d0f0d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1734125435159 2024-12-13T21:30:38,037 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting baade9d899f241b9af09a5546a7fbcae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734125435302 2024-12-13T21:30:38,037 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fcc7c0338004f4cad9bea1fb55b8bf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1734125437436 2024-12-13T21:30:38,047 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#68 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:38,048 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/dffac29f815a4405bcf3cbd9b83cfa9c is 50, key is test_row_0/C:col10/1734125437448/Put/seqid=0 2024-12-13T21:30:38,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741907_1083 (size=13051) 2024-12-13T21:30:38,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:38,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:38,103 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:38,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:38,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/3020a5b9ff8c413491e14389c8bbc88c is 50, key is test_row_0/A:col10/1734125437513/Put/seqid=0 2024-12-13T21:30:38,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741908_1084 (size=12301) 2024-12-13T21:30:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:38,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:38,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125498166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-13T21:30:38,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125498270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:38,420 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/9ce2d71a117d4803b8cd0dec1c5bea9b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ce2d71a117d4803b8cd0dec1c5bea9b 2024-12-13T21:30:38,425 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 9ce2d71a117d4803b8cd0dec1c5bea9b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:38,426 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:38,426 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125437969; duration=0sec 2024-12-13T21:30:38,426 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:38,426 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:38,467 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/dffac29f815a4405bcf3cbd9b83cfa9c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/dffac29f815a4405bcf3cbd9b83cfa9c 2024-12-13T21:30:38,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125498473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:38,474 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into dffac29f815a4405bcf3cbd9b83cfa9c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:38,474 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:38,474 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125437969; duration=0sec 2024-12-13T21:30:38,474 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:38,474 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:38,517 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/3020a5b9ff8c413491e14389c8bbc88c 2024-12-13T21:30:38,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c9ca727a65914bb69dd8b3bf14cb8d74 is 50, key is test_row_0/B:col10/1734125437513/Put/seqid=0 2024-12-13T21:30:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741909_1085 (size=12301) 2024-12-13T21:30:38,535 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c9ca727a65914bb69dd8b3bf14cb8d74 2024-12-13T21:30:38,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/d829ec4afbdb4ae08d8509c7b8a53ed1 is 50, key is test_row_0/C:col10/1734125437513/Put/seqid=0 2024-12-13T21:30:38,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741910_1086 (size=12301) 2024-12-13T21:30:38,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-13T21:30:38,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:38,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125498776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,015 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/d829ec4afbdb4ae08d8509c7b8a53ed1 2024-12-13T21:30:39,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/3020a5b9ff8c413491e14389c8bbc88c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3020a5b9ff8c413491e14389c8bbc88c 2024-12-13T21:30:39,029 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3020a5b9ff8c413491e14389c8bbc88c, entries=150, sequenceid=326, filesize=12.0 K 2024-12-13T21:30:39,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c9ca727a65914bb69dd8b3bf14cb8d74 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9ca727a65914bb69dd8b3bf14cb8d74 2024-12-13T21:30:39,041 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9ca727a65914bb69dd8b3bf14cb8d74, entries=150, sequenceid=326, filesize=12.0 K 2024-12-13T21:30:39,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/d829ec4afbdb4ae08d8509c7b8a53ed1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/d829ec4afbdb4ae08d8509c7b8a53ed1 2024-12-13T21:30:39,052 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/d829ec4afbdb4ae08d8509c7b8a53ed1, entries=150, sequenceid=326, filesize=12.0 K 2024-12-13T21:30:39,053 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cc66b42faaed28a8693a712966f73789 in 950ms, sequenceid=326, compaction requested=false 2024-12-13T21:30:39,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:39,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:39,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-13T21:30:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-13T21:30:39,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-13T21:30:39,058 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-12-13T21:30:39,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.4170 sec 2024-12-13T21:30:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:39,281 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:30:39,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:39,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:39,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:39,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:39,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:39,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:39,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/9bf69ef925fe42e1b340ce8fcb26b786 is 50, key is test_row_0/A:col10/1734125439280/Put/seqid=0 2024-12-13T21:30:39,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741911_1087 (size=14741) 2024-12-13T21:30:39,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/9bf69ef925fe42e1b340ce8fcb26b786 2024-12-13T21:30:39,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/fc9d6de9e272494e873464b92725a2ab is 50, key is test_row_0/B:col10/1734125439280/Put/seqid=0 2024-12-13T21:30:39,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125499354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741912_1088 (size=12301) 2024-12-13T21:30:39,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125499457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125499660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-13T21:30:39,747 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-13T21:30:39,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:39,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-13T21:30:39,754 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:39,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-13T21:30:39,755 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:39,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:39,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/fc9d6de9e272494e873464b92725a2ab 2024-12-13T21:30:39,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/0e60e1ec265545eabab309887b314415 is 50, key is test_row_0/C:col10/1734125439280/Put/seqid=0 2024-12-13T21:30:39,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34558 deadline: 1734125499795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,796 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:39,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741913_1089 (size=12301) 2024-12-13T21:30:39,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/0e60e1ec265545eabab309887b314415 2024-12-13T21:30:39,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34570 deadline: 1734125499803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,806 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:39,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/9bf69ef925fe42e1b340ce8fcb26b786 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9bf69ef925fe42e1b340ce8fcb26b786 2024-12-13T21:30:39,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9bf69ef925fe42e1b340ce8fcb26b786, entries=200, sequenceid=340, filesize=14.4 K 2024-12-13T21:30:39,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/fc9d6de9e272494e873464b92725a2ab as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fc9d6de9e272494e873464b92725a2ab 2024-12-13T21:30:39,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34564 deadline: 1734125499818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,820 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:39,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fc9d6de9e272494e873464b92725a2ab, entries=150, sequenceid=340, filesize=12.0 K 2024-12-13T21:30:39,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34552 deadline: 1734125499826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,827 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:30:39,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/0e60e1ec265545eabab309887b314415 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0e60e1ec265545eabab309887b314415 2024-12-13T21:30:39,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0e60e1ec265545eabab309887b314415, entries=150, sequenceid=340, filesize=12.0 K 2024-12-13T21:30:39,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cc66b42faaed28a8693a712966f73789 in 555ms, sequenceid=340, compaction requested=true 2024-12-13T21:30:39,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:39,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:39,836 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:39,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:39,836 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:39,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:39,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:39,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:39,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:39,838 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:39,838 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:39,838 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:39,838 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ce2d71a117d4803b8cd0dec1c5bea9b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9ca727a65914bb69dd8b3bf14cb8d74, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fc9d6de9e272494e873464b92725a2ab] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.8 K 2024-12-13T21:30:39,839 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ce2d71a117d4803b8cd0dec1c5bea9b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1734125437436 2024-12-13T21:30:39,840 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:39,840 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:39,840 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:39,840 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/0aa6762a430c4d178e6827ce46c41974, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3020a5b9ff8c413491e14389c8bbc88c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9bf69ef925fe42e1b340ce8fcb26b786] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=39.2 K 2024-12-13T21:30:39,841 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c9ca727a65914bb69dd8b3bf14cb8d74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1734125437511 2024-12-13T21:30:39,841 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0aa6762a430c4d178e6827ce46c41974, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1734125437436 2024-12-13T21:30:39,841 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3020a5b9ff8c413491e14389c8bbc88c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1734125437511 2024-12-13T21:30:39,842 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fc9d6de9e272494e873464b92725a2ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734125438136 2024-12-13T21:30:39,842 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bf69ef925fe42e1b340ce8fcb26b786, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734125438136 2024-12-13T21:30:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-13T21:30:39,862 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#75 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:39,863 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/f1bace814fb147f4a7b038687034bb7b is 50, key is test_row_0/B:col10/1734125439280/Put/seqid=0 2024-12-13T21:30:39,869 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:39,869 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/6b1508fcdf544259aa062214f6f2d77f is 50, key is test_row_0/A:col10/1734125439280/Put/seqid=0 2024-12-13T21:30:39,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741914_1090 (size=13153) 2024-12-13T21:30:39,881 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/f1bace814fb147f4a7b038687034bb7b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f1bace814fb147f4a7b038687034bb7b 2024-12-13T21:30:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741915_1091 (size=13153) 2024-12-13T21:30:39,900 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into f1bace814fb147f4a7b038687034bb7b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:39,901 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:39,901 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125439836; duration=0sec 2024-12-13T21:30:39,901 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:39,901 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:39,901 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:39,902 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:39,902 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:39,902 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:39,903 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/dffac29f815a4405bcf3cbd9b83cfa9c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/d829ec4afbdb4ae08d8509c7b8a53ed1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0e60e1ec265545eabab309887b314415] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.8 K 2024-12-13T21:30:39,904 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting dffac29f815a4405bcf3cbd9b83cfa9c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1734125437436 2024-12-13T21:30:39,904 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d829ec4afbdb4ae08d8509c7b8a53ed1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1734125437511 2024-12-13T21:30:39,905 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e60e1ec265545eabab309887b314415, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734125438136 2024-12-13T21:30:39,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:39,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-13T21:30:39,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:39,908 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:30:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:39,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/9b60866015cc48b9889635117048b81f is 50, key is test_row_0/A:col10/1734125439352/Put/seqid=0 2024-12-13T21:30:39,932 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#78 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:39,933 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/6635f12916ba44c7865b987d33e61133 is 50, key is test_row_0/C:col10/1734125439280/Put/seqid=0 2024-12-13T21:30:39,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741917_1093 (size=13153) 2024-12-13T21:30:39,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741916_1092 (size=12301) 2024-12-13T21:30:39,959 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/9b60866015cc48b9889635117048b81f 2024-12-13T21:30:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:39,968 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/6635f12916ba44c7865b987d33e61133 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/6635f12916ba44c7865b987d33e61133 2024-12-13T21:30:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:39,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c893e7982d514cb5bdcc77dcdcd02bd7 is 50, key is test_row_0/B:col10/1734125439352/Put/seqid=0 2024-12-13T21:30:39,980 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 6635f12916ba44c7865b987d33e61133(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:39,980 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:39,980 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125439837; duration=0sec 2024-12-13T21:30:39,980 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:39,980 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:39,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741918_1094 (size=12301) 2024-12-13T21:30:39,987 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c893e7982d514cb5bdcc77dcdcd02bd7 2024-12-13T21:30:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125499993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/eef54cf2a1564b4a8b9b8367953fef25 is 50, key is test_row_0/C:col10/1734125439352/Put/seqid=0 2024-12-13T21:30:40,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741919_1095 (size=12301) 2024-12-13T21:30:40,007 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/eef54cf2a1564b4a8b9b8367953fef25 2024-12-13T21:30:40,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/9b60866015cc48b9889635117048b81f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9b60866015cc48b9889635117048b81f 2024-12-13T21:30:40,018 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9b60866015cc48b9889635117048b81f, entries=150, sequenceid=365, filesize=12.0 K 2024-12-13T21:30:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c893e7982d514cb5bdcc77dcdcd02bd7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c893e7982d514cb5bdcc77dcdcd02bd7 2024-12-13T21:30:40,025 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c893e7982d514cb5bdcc77dcdcd02bd7, entries=150, sequenceid=365, filesize=12.0 K 2024-12-13T21:30:40,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/eef54cf2a1564b4a8b9b8367953fef25 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/eef54cf2a1564b4a8b9b8367953fef25 2024-12-13T21:30:40,037 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/eef54cf2a1564b4a8b9b8367953fef25, entries=150, sequenceid=365, filesize=12.0 K 2024-12-13T21:30:40,038 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for cc66b42faaed28a8693a712966f73789 in 130ms, sequenceid=365, compaction requested=false 2024-12-13T21:30:40,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:40,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-13T21:30:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-13T21:30:40,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-13T21:30:40,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 286 msec 2024-12-13T21:30:40,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 292 msec 2024-12-13T21:30:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-13T21:30:40,057 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-13T21:30:40,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-13T21:30:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:40,061 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:40,062 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:40,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:40,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:30:40,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:40,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:40,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:40,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:40,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:40,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:40,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/1a7180dc168a4e83996284b58de0bdb1 is 50, key is test_row_0/A:col10/1734125440095/Put/seqid=0 2024-12-13T21:30:40,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741920_1096 (size=14741) 2024-12-13T21:30:40,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125500156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:40,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:40,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:40,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125500261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,291 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/6b1508fcdf544259aa062214f6f2d77f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/6b1508fcdf544259aa062214f6f2d77f 2024-12-13T21:30:40,304 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 6b1508fcdf544259aa062214f6f2d77f(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:40,304 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:40,304 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125439836; duration=0sec 2024-12-13T21:30:40,304 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:40,304 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:40,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:40,376 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:40,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:40,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:40,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125500465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/1a7180dc168a4e83996284b58de0bdb1 2024-12-13T21:30:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/62101a04052e41c581a4abbaf727ee9e is 50, key is test_row_0/B:col10/1734125440095/Put/seqid=0 2024-12-13T21:30:40,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741921_1097 (size=12301) 2024-12-13T21:30:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:40,684 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:40,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:40,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125500769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,837 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:40,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:40,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/62101a04052e41c581a4abbaf727ee9e 2024-12-13T21:30:40,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1799590e6869488b98bea2727d1b0293 is 50, key is test_row_0/C:col10/1734125440095/Put/seqid=0 2024-12-13T21:30:40,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741922_1098 (size=12301) 2024-12-13T21:30:40,990 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:40,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:40,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:40,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:40,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:40,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,143 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:41,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:41,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:41,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:41,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:41,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125501277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:41,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:41,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:41,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:41,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:41,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1799590e6869488b98bea2727d1b0293 2024-12-13T21:30:41,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/1a7180dc168a4e83996284b58de0bdb1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1a7180dc168a4e83996284b58de0bdb1 2024-12-13T21:30:41,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1a7180dc168a4e83996284b58de0bdb1, entries=200, sequenceid=379, filesize=14.4 K 2024-12-13T21:30:41,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/62101a04052e41c581a4abbaf727ee9e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/62101a04052e41c581a4abbaf727ee9e 2024-12-13T21:30:41,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/62101a04052e41c581a4abbaf727ee9e, entries=150, sequenceid=379, filesize=12.0 K 2024-12-13T21:30:41,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1799590e6869488b98bea2727d1b0293 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1799590e6869488b98bea2727d1b0293 2024-12-13T21:30:41,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1799590e6869488b98bea2727d1b0293, entries=150, sequenceid=379, filesize=12.0 K 2024-12-13T21:30:41,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cc66b42faaed28a8693a712966f73789 in 1299ms, sequenceid=379, compaction requested=true 2024-12-13T21:30:41,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:41,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:41,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:41,395 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:41,395 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:41,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:41,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:41,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:41,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:41,397 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:41,397 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:41,397 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:41,397 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:41,397 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,397 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,397 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/6b1508fcdf544259aa062214f6f2d77f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9b60866015cc48b9889635117048b81f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1a7180dc168a4e83996284b58de0bdb1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=39.3 K 2024-12-13T21:30:41,397 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f1bace814fb147f4a7b038687034bb7b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c893e7982d514cb5bdcc77dcdcd02bd7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/62101a04052e41c581a4abbaf727ee9e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.9 K 2024-12-13T21:30:41,397 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b1508fcdf544259aa062214f6f2d77f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734125438136 2024-12-13T21:30:41,397 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting f1bace814fb147f4a7b038687034bb7b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734125438136 2024-12-13T21:30:41,398 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b60866015cc48b9889635117048b81f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1734125439334 2024-12-13T21:30:41,398 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c893e7982d514cb5bdcc77dcdcd02bd7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1734125439334 2024-12-13T21:30:41,398 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a7180dc168a4e83996284b58de0bdb1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734125439985 2024-12-13T21:30:41,398 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 62101a04052e41c581a4abbaf727ee9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734125439985 2024-12-13T21:30:41,406 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:41,406 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/350b63ecf4c142ec97d358ef2c1ea834 is 50, key is test_row_0/A:col10/1734125440095/Put/seqid=0 2024-12-13T21:30:41,407 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#85 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:41,408 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/b7c8e94033a54538833ff1b15f8db39e is 50, key is test_row_0/B:col10/1734125440095/Put/seqid=0 2024-12-13T21:30:41,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741924_1100 (size=13255) 2024-12-13T21:30:41,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741923_1099 (size=13255) 2024-12-13T21:30:41,427 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/350b63ecf4c142ec97d358ef2c1ea834 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/350b63ecf4c142ec97d358ef2c1ea834 2024-12-13T21:30:41,433 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 350b63ecf4c142ec97d358ef2c1ea834(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:41,433 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:41,433 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125441395; duration=0sec 2024-12-13T21:30:41,433 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:41,433 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:41,433 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:41,434 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:41,435 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:41,435 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,435 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/6635f12916ba44c7865b987d33e61133, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/eef54cf2a1564b4a8b9b8367953fef25, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1799590e6869488b98bea2727d1b0293] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=36.9 K 2024-12-13T21:30:41,435 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6635f12916ba44c7865b987d33e61133, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734125438136 2024-12-13T21:30:41,436 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting eef54cf2a1564b4a8b9b8367953fef25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1734125439334 2024-12-13T21:30:41,437 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1799590e6869488b98bea2727d1b0293, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734125439985 2024-12-13T21:30:41,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:41,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:41,449 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:41,449 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#86 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:41,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:41,450 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8981ffba90324a5aaf95ed81519f18b9 is 50, key is test_row_0/C:col10/1734125440095/Put/seqid=0 2024-12-13T21:30:41,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/098b176a5e374c459db4d6f4c5effb31 is 50, key is test_row_0/A:col10/1734125440155/Put/seqid=0 2024-12-13T21:30:41,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741925_1101 (size=13255) 2024-12-13T21:30:41,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741926_1102 (size=12301) 2024-12-13T21:30:41,826 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/b7c8e94033a54538833ff1b15f8db39e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/b7c8e94033a54538833ff1b15f8db39e 2024-12-13T21:30:41,833 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into b7c8e94033a54538833ff1b15f8db39e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:41,833 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:41,833 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125441395; duration=0sec 2024-12-13T21:30:41,834 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:41,834 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:41,878 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/098b176a5e374c459db4d6f4c5effb31 2024-12-13T21:30:41,887 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8981ffba90324a5aaf95ed81519f18b9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8981ffba90324a5aaf95ed81519f18b9 2024-12-13T21:30:41,895 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 8981ffba90324a5aaf95ed81519f18b9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:41,895 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:41,895 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125441396; duration=0sec 2024-12-13T21:30:41,895 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:41,895 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:41,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7864979fed3f48d0a1670f08837d26d8 is 50, key is test_row_0/B:col10/1734125440155/Put/seqid=0 2024-12-13T21:30:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741927_1103 (size=12301) 2024-12-13T21:30:41,910 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7864979fed3f48d0a1670f08837d26d8 2024-12-13T21:30:41,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ecbf1f3af9004031b10f759ed32a6601 is 50, key is test_row_0/C:col10/1734125440155/Put/seqid=0 2024-12-13T21:30:41,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741928_1104 (size=12301) 2024-12-13T21:30:42,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:42,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:42,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:42,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125502303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:42,332 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ecbf1f3af9004031b10f759ed32a6601 2024-12-13T21:30:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/098b176a5e374c459db4d6f4c5effb31 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/098b176a5e374c459db4d6f4c5effb31 2024-12-13T21:30:42,342 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/098b176a5e374c459db4d6f4c5effb31, entries=150, sequenceid=404, filesize=12.0 K 2024-12-13T21:30:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/7864979fed3f48d0a1670f08837d26d8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7864979fed3f48d0a1670f08837d26d8 2024-12-13T21:30:42,348 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7864979fed3f48d0a1670f08837d26d8, entries=150, sequenceid=404, filesize=12.0 K 2024-12-13T21:30:42,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ecbf1f3af9004031b10f759ed32a6601 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ecbf1f3af9004031b10f759ed32a6601 2024-12-13T21:30:42,359 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ecbf1f3af9004031b10f759ed32a6601, entries=150, sequenceid=404, filesize=12.0 K 2024-12-13T21:30:42,360 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for cc66b42faaed28a8693a712966f73789 in 911ms, sequenceid=404, compaction requested=false 2024-12-13T21:30:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:42,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-13T21:30:42,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-13T21:30:42,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-13T21:30:42,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2990 sec 2024-12-13T21:30:42,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 2.3040 sec 2024-12-13T21:30:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:42,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:30:42,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:42,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:42,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:42,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:42,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:42,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:42,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/65e5b38949214608a89d0234f82bc844 is 50, key is test_row_0/A:col10/1734125442407/Put/seqid=0 2024-12-13T21:30:42,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741929_1105 (size=12301) 2024-12-13T21:30:42,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125502452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125502555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:42,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125502758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:42,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/65e5b38949214608a89d0234f82bc844 2024-12-13T21:30:42,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/20bd21644ed44cca878f43e7fafa4ea3 is 50, key is test_row_0/B:col10/1734125442407/Put/seqid=0 2024-12-13T21:30:42,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741930_1106 (size=12301) 2024-12-13T21:30:43,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:43,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 309 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125503061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:43,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/20bd21644ed44cca878f43e7fafa4ea3 2024-12-13T21:30:43,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8780a0b09b5745da9938d6f5364279b2 is 50, key is test_row_0/C:col10/1734125442407/Put/seqid=0 2024-12-13T21:30:43,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741931_1107 (size=12301) 2024-12-13T21:30:43,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125503565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:43,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8780a0b09b5745da9938d6f5364279b2 2024-12-13T21:30:43,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/65e5b38949214608a89d0234f82bc844 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/65e5b38949214608a89d0234f82bc844 2024-12-13T21:30:43,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/65e5b38949214608a89d0234f82bc844, entries=150, sequenceid=420, filesize=12.0 K 2024-12-13T21:30:43,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/20bd21644ed44cca878f43e7fafa4ea3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/20bd21644ed44cca878f43e7fafa4ea3 2024-12-13T21:30:43,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/20bd21644ed44cca878f43e7fafa4ea3, entries=150, sequenceid=420, filesize=12.0 K 2024-12-13T21:30:43,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/8780a0b09b5745da9938d6f5364279b2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8780a0b09b5745da9938d6f5364279b2 2024-12-13T21:30:43,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8780a0b09b5745da9938d6f5364279b2, entries=150, sequenceid=420, filesize=12.0 K 2024-12-13T21:30:43,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for cc66b42faaed28a8693a712966f73789 in 1281ms, sequenceid=420, compaction requested=true 2024-12-13T21:30:43,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:43,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:43,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:43,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:43,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:43,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:43,690 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:43,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:43,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:43,691 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:43,691 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:43,691 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:43,691 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:43,691 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:43,691 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:43,691 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/350b63ecf4c142ec97d358ef2c1ea834, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/098b176a5e374c459db4d6f4c5effb31, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/65e5b38949214608a89d0234f82bc844] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.0 K 2024-12-13T21:30:43,691 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/b7c8e94033a54538833ff1b15f8db39e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7864979fed3f48d0a1670f08837d26d8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/20bd21644ed44cca878f43e7fafa4ea3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.0 K 2024-12-13T21:30:43,691 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 350b63ecf4c142ec97d358ef2c1ea834, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734125439985 2024-12-13T21:30:43,691 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b7c8e94033a54538833ff1b15f8db39e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734125439985 2024-12-13T21:30:43,692 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 7864979fed3f48d0a1670f08837d26d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1734125440145 2024-12-13T21:30:43,692 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 098b176a5e374c459db4d6f4c5effb31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1734125440145 2024-12-13T21:30:43,692 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 20bd21644ed44cca878f43e7fafa4ea3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1734125442300 2024-12-13T21:30:43,692 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65e5b38949214608a89d0234f82bc844, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1734125442300 2024-12-13T21:30:43,700 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:43,700 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/61a27290bb22410593a46d524cd6add7 is 50, key is test_row_0/A:col10/1734125442407/Put/seqid=0 2024-12-13T21:30:43,704 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#94 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:43,705 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c2472958e01f4cb982ed574dbc064128 is 50, key is test_row_0/B:col10/1734125442407/Put/seqid=0 2024-12-13T21:30:43,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741932_1108 (size=13357) 2024-12-13T21:30:43,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741933_1109 (size=13357) 2024-12-13T21:30:43,720 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/61a27290bb22410593a46d524cd6add7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/61a27290bb22410593a46d524cd6add7 2024-12-13T21:30:43,725 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into 61a27290bb22410593a46d524cd6add7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:43,725 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:43,725 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125443689; duration=0sec 2024-12-13T21:30:43,725 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:43,725 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:43,725 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:43,726 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:43,727 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:43,727 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:43,727 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8981ffba90324a5aaf95ed81519f18b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ecbf1f3af9004031b10f759ed32a6601, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8780a0b09b5745da9938d6f5364279b2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.0 K 2024-12-13T21:30:43,728 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8981ffba90324a5aaf95ed81519f18b9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734125439985 2024-12-13T21:30:43,729 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecbf1f3af9004031b10f759ed32a6601, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1734125440145 2024-12-13T21:30:43,729 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8780a0b09b5745da9938d6f5364279b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1734125442300 2024-12-13T21:30:43,736 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:43,737 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/46d6b380c321406cb455aa88ea82ae8b is 50, key is test_row_0/C:col10/1734125442407/Put/seqid=0 2024-12-13T21:30:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741934_1110 (size=13357) 2024-12-13T21:30:43,746 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/46d6b380c321406cb455aa88ea82ae8b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/46d6b380c321406cb455aa88ea82ae8b 2024-12-13T21:30:43,751 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into 46d6b380c321406cb455aa88ea82ae8b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:43,752 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:43,752 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125443690; duration=0sec 2024-12-13T21:30:43,752 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:43,752 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:44,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-13T21:30:44,166 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-13T21:30:44,167 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:44,168 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c2472958e01f4cb982ed574dbc064128 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c2472958e01f4cb982ed574dbc064128 2024-12-13T21:30:44,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-13T21:30:44,168 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:44,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-13T21:30:44,169 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:44,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:44,176 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into c2472958e01f4cb982ed574dbc064128(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:44,176 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:44,176 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125443690; duration=0sec 2024-12-13T21:30:44,176 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:44,177 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:44,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-13T21:30:44,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:44,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-13T21:30:44,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:44,323 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-13T21:30:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:44,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/b570db8db7d8449f8f87fca0b9a4adec is 50, key is test_row_0/A:col10/1734125442449/Put/seqid=0 2024-12-13T21:30:44,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741935_1111 (size=12301) 2024-12-13T21:30:44,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-13T21:30:44,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:44,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:44,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:44,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 324 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125504590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:44,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:44,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 326 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125504693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:44,732 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/b570db8db7d8449f8f87fca0b9a4adec 2024-12-13T21:30:44,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/8d447df0e6694764be7454af9a499bb3 is 50, key is test_row_0/B:col10/1734125442449/Put/seqid=0 2024-12-13T21:30:44,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741936_1112 (size=12301) 2024-12-13T21:30:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-13T21:30:44,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 328 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125504895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:45,151 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/8d447df0e6694764be7454af9a499bb3 2024-12-13T21:30:45,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/a139a775b08e4e1fbeb270d550d35a84 is 50, key is test_row_0/C:col10/1734125442449/Put/seqid=0 2024-12-13T21:30:45,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741937_1113 (size=12301) 2024-12-13T21:30:45,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:45,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 330 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125505197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:45,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-13T21:30:45,567 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/a139a775b08e4e1fbeb270d550d35a84 2024-12-13T21:30:45,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/b570db8db7d8449f8f87fca0b9a4adec as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b570db8db7d8449f8f87fca0b9a4adec 2024-12-13T21:30:45,577 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b570db8db7d8449f8f87fca0b9a4adec, entries=150, sequenceid=445, filesize=12.0 K 2024-12-13T21:30:45,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-13T21:30:45,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/8d447df0e6694764be7454af9a499bb3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/8d447df0e6694764be7454af9a499bb3 2024-12-13T21:30:45,584 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/8d447df0e6694764be7454af9a499bb3, entries=150, sequenceid=445, filesize=12.0 K 2024-12-13T21:30:45,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/a139a775b08e4e1fbeb270d550d35a84 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a139a775b08e4e1fbeb270d550d35a84 2024-12-13T21:30:45,592 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a139a775b08e4e1fbeb270d550d35a84, entries=150, sequenceid=445, filesize=12.0 K 2024-12-13T21:30:45,593 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for cc66b42faaed28a8693a712966f73789 in 1270ms, sequenceid=445, compaction requested=false 2024-12-13T21:30:45,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:45,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:45,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-13T21:30:45,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-13T21:30:45,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-13T21:30:45,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4260 sec 2024-12-13T21:30:45,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.4300 sec 2024-12-13T21:30:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:45,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-13T21:30:45,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:45,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:45,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:45,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:45,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:45,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:45,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/cacc8c51e02a4b3aa51055ae8edffe7b is 50, key is test_row_0/A:col10/1734125445699/Put/seqid=0 2024-12-13T21:30:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741938_1114 (size=12301) 2024-12-13T21:30:45,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 351 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125505738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:45,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 353 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125505840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:46,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 355 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125506042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:46,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/cacc8c51e02a4b3aa51055ae8edffe7b 2024-12-13T21:30:46,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/32c0c426540341e7987f5cef0a6dd647 is 50, key is test_row_0/B:col10/1734125445699/Put/seqid=0 2024-12-13T21:30:46,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741939_1115 (size=12301) 2024-12-13T21:30:46,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/32c0c426540341e7987f5cef0a6dd647 2024-12-13T21:30:46,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1c1b00b6f03647e5a7554f6e57a8f615 is 50, key is test_row_0/C:col10/1734125445699/Put/seqid=0 2024-12-13T21:30:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741940_1116 (size=12301) 2024-12-13T21:30:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-13T21:30:46,273 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-13T21:30:46,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-13T21:30:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T21:30:46,275 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:46,276 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:46,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:46,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 357 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125506345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T21:30:46,427 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:46,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-13T21:30:46,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:46,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:46,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:46,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:46,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:46,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1c1b00b6f03647e5a7554f6e57a8f615 2024-12-13T21:30:46,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/cacc8c51e02a4b3aa51055ae8edffe7b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/cacc8c51e02a4b3aa51055ae8edffe7b 2024-12-13T21:30:46,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/cacc8c51e02a4b3aa51055ae8edffe7b, entries=150, sequenceid=460, filesize=12.0 K 2024-12-13T21:30:46,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/32c0c426540341e7987f5cef0a6dd647 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/32c0c426540341e7987f5cef0a6dd647 2024-12-13T21:30:46,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/32c0c426540341e7987f5cef0a6dd647, entries=150, sequenceid=460, filesize=12.0 K 2024-12-13T21:30:46,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/1c1b00b6f03647e5a7554f6e57a8f615 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1c1b00b6f03647e5a7554f6e57a8f615 2024-12-13T21:30:46,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1c1b00b6f03647e5a7554f6e57a8f615, entries=150, sequenceid=460, filesize=12.0 K 2024-12-13T21:30:46,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for cc66b42faaed28a8693a712966f73789 in 867ms, sequenceid=460, compaction requested=true 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:46,568 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:46,568 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:46,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:46,569 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:46,569 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:46,569 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:46,569 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:46,569 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:46,569 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:46,569 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/61a27290bb22410593a46d524cd6add7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b570db8db7d8449f8f87fca0b9a4adec, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/cacc8c51e02a4b3aa51055ae8edffe7b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.1 K 2024-12-13T21:30:46,569 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c2472958e01f4cb982ed574dbc064128, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/8d447df0e6694764be7454af9a499bb3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/32c0c426540341e7987f5cef0a6dd647] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.1 K 2024-12-13T21:30:46,570 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c2472958e01f4cb982ed574dbc064128, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1734125442300 2024-12-13T21:30:46,570 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61a27290bb22410593a46d524cd6add7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1734125442300 2024-12-13T21:30:46,570 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d447df0e6694764be7454af9a499bb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1734125442442 2024-12-13T21:30:46,570 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b570db8db7d8449f8f87fca0b9a4adec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1734125442442 2024-12-13T21:30:46,570 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 32c0c426540341e7987f5cef0a6dd647, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734125444588 2024-12-13T21:30:46,570 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting cacc8c51e02a4b3aa51055ae8edffe7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734125444588 2024-12-13T21:30:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T21:30:46,578 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:46,578 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#103 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:46,579 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/881a92e180ab46778ff98844bcb4ebe2 is 50, key is test_row_0/B:col10/1734125445699/Put/seqid=0 2024-12-13T21:30:46,579 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/adb4978aaf6d4fc3a18575a6a95f4c7e is 50, key is test_row_0/A:col10/1734125445699/Put/seqid=0 2024-12-13T21:30:46,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:46,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-13T21:30:46,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:46,581 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-13T21:30:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:46,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:46,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741941_1117 (size=13459) 2024-12-13T21:30:46,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/62d8be8a59624a9ab83eedc2c95b8fc2 is 50, key is test_row_0/A:col10/1734125445726/Put/seqid=0 2024-12-13T21:30:46,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741942_1118 (size=13459) 2024-12-13T21:30:46,593 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/881a92e180ab46778ff98844bcb4ebe2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/881a92e180ab46778ff98844bcb4ebe2 2024-12-13T21:30:46,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741943_1119 (size=12301) 2024-12-13T21:30:46,593 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/adb4978aaf6d4fc3a18575a6a95f4c7e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/adb4978aaf6d4fc3a18575a6a95f4c7e 2024-12-13T21:30:46,594 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/62d8be8a59624a9ab83eedc2c95b8fc2 2024-12-13T21:30:46,602 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into adb4978aaf6d4fc3a18575a6a95f4c7e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:46,602 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:46,602 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 881a92e180ab46778ff98844bcb4ebe2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:46,602 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:46,602 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125446568; duration=0sec 2024-12-13T21:30:46,602 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125446568; duration=0sec 2024-12-13T21:30:46,602 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:46,602 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:46,602 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:46,603 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:46,603 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:46,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/0e466b0a91c14835aa0feca8e6b9daae is 50, key is test_row_0/B:col10/1734125445726/Put/seqid=0 2024-12-13T21:30:46,606 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:46,606 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:46,606 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:46,606 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/46d6b380c321406cb455aa88ea82ae8b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a139a775b08e4e1fbeb270d550d35a84, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1c1b00b6f03647e5a7554f6e57a8f615] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.1 K 2024-12-13T21:30:46,607 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46d6b380c321406cb455aa88ea82ae8b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1734125442300 2024-12-13T21:30:46,607 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a139a775b08e4e1fbeb270d550d35a84, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1734125442442 2024-12-13T21:30:46,608 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c1b00b6f03647e5a7554f6e57a8f615, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734125444588 2024-12-13T21:30:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741944_1120 (size=12301) 2024-12-13T21:30:46,611 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/0e466b0a91c14835aa0feca8e6b9daae 2024-12-13T21:30:46,625 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:46,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/b7b5097b1e8a45929a25e10a5a6e5508 is 50, key is test_row_0/C:col10/1734125445726/Put/seqid=0 2024-12-13T21:30:46,626 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/c5fbcbc351774df2b17f33630889b40c is 50, key is test_row_0/C:col10/1734125445699/Put/seqid=0 2024-12-13T21:30:46,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741945_1121 (size=12301) 2024-12-13T21:30:46,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741946_1122 (size=13459) 2024-12-13T21:30:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:46,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:46,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 371 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125506872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T21:30:46,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:46,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 373 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125506974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,032 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/b7b5097b1e8a45929a25e10a5a6e5508 2024-12-13T21:30:47,038 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/c5fbcbc351774df2b17f33630889b40c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/c5fbcbc351774df2b17f33630889b40c 2024-12-13T21:30:47,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/62d8be8a59624a9ab83eedc2c95b8fc2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/62d8be8a59624a9ab83eedc2c95b8fc2 2024-12-13T21:30:47,044 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/62d8be8a59624a9ab83eedc2c95b8fc2, entries=150, sequenceid=481, filesize=12.0 K 2024-12-13T21:30:47,045 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into c5fbcbc351774df2b17f33630889b40c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:47,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:47,045 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125446568; duration=0sec 2024-12-13T21:30:47,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:47,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:47,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/0e466b0a91c14835aa0feca8e6b9daae as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0e466b0a91c14835aa0feca8e6b9daae 2024-12-13T21:30:47,052 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0e466b0a91c14835aa0feca8e6b9daae, entries=150, sequenceid=481, filesize=12.0 K 2024-12-13T21:30:47,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/b7b5097b1e8a45929a25e10a5a6e5508 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/b7b5097b1e8a45929a25e10a5a6e5508 2024-12-13T21:30:47,060 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/b7b5097b1e8a45929a25e10a5a6e5508, entries=150, sequenceid=481, filesize=12.0 K 2024-12-13T21:30:47,061 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for cc66b42faaed28a8693a712966f73789 in 481ms, sequenceid=481, compaction requested=false 2024-12-13T21:30:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-13T21:30:47,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-13T21:30:47,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-13T21:30:47,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 786 msec 2024-12-13T21:30:47,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 790 msec 2024-12-13T21:30:47,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:47,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-13T21:30:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:47,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/a1bcb5ec8a714554b3ced9d51bd3e4b6 is 50, key is test_row_0/A:col10/1734125447177/Put/seqid=0 2024-12-13T21:30:47,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741947_1123 (size=14741) 2024-12-13T21:30:47,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:47,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 393 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125507220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 395 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125507323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,353 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:30:47,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T21:30:47,378 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-13T21:30:47,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:47,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-12-13T21:30:47,381 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:47,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:47,381 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:47,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:47,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:47,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:47,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 397 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125507526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,564 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50c9c1d1 to 127.0.0.1:57927 2024-12-13T21:30:47,564 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f0c7188 to 127.0.0.1:57927 2024-12-13T21:30:47,564 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:47,564 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:47,565 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x475ca0f4 to 127.0.0.1:57927 2024-12-13T21:30:47,565 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:47,566 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f1331a9 to 127.0.0.1:57927 2024-12-13T21:30:47,566 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:47,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/a1bcb5ec8a714554b3ced9d51bd3e4b6 2024-12-13T21:30:47,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/cc612ef4819144dc9db02518003d9ad8 is 50, key is test_row_0/B:col10/1734125447177/Put/seqid=0 2024-12-13T21:30:47,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741948_1124 (size=12301) 2024-12-13T21:30:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:47,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:47,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:47,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:47,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 399 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125507830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,842 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:47,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:47,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:47,995 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:47,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:47,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:47,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:47,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:47,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/cc612ef4819144dc9db02518003d9ad8 2024-12-13T21:30:48,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4070514a410f40748caae50a9b04e9bf is 50, key is test_row_0/C:col10/1734125447177/Put/seqid=0 2024-12-13T21:30:48,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741949_1125 (size=12301) 2024-12-13T21:30:48,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:48,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:48,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:48,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,308 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:48,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:48,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:48,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:48,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:48,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 401 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34524 deadline: 1734125508335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:48,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4070514a410f40748caae50a9b04e9bf 2024-12-13T21:30:48,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/a1bcb5ec8a714554b3ced9d51bd3e4b6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a1bcb5ec8a714554b3ced9d51bd3e4b6 2024-12-13T21:30:48,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a1bcb5ec8a714554b3ced9d51bd3e4b6, entries=200, sequenceid=500, filesize=14.4 K 2024-12-13T21:30:48,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/cc612ef4819144dc9db02518003d9ad8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/cc612ef4819144dc9db02518003d9ad8 2024-12-13T21:30:48,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/cc612ef4819144dc9db02518003d9ad8, entries=150, sequenceid=500, filesize=12.0 K 2024-12-13T21:30:48,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/4070514a410f40748caae50a9b04e9bf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4070514a410f40748caae50a9b04e9bf 2024-12-13T21:30:48,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4070514a410f40748caae50a9b04e9bf, entries=150, sequenceid=500, filesize=12.0 K 2024-12-13T21:30:48,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for cc66b42faaed28a8693a712966f73789 in 1272ms, sequenceid=500, compaction requested=true 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:48,450 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:48,450 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc66b42faaed28a8693a712966f73789:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:48,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:48,451 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:48,451 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:48,451 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/B is initiating minor compaction (all files) 2024-12-13T21:30:48,451 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/A is initiating minor compaction (all files) 2024-12-13T21:30:48,451 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/B in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,451 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/A in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,451 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/881a92e180ab46778ff98844bcb4ebe2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0e466b0a91c14835aa0feca8e6b9daae, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/cc612ef4819144dc9db02518003d9ad8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.2 K 2024-12-13T21:30:48,451 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/adb4978aaf6d4fc3a18575a6a95f4c7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/62d8be8a59624a9ab83eedc2c95b8fc2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a1bcb5ec8a714554b3ced9d51bd3e4b6] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=39.6 K 2024-12-13T21:30:48,452 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 881a92e180ab46778ff98844bcb4ebe2, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734125444588 2024-12-13T21:30:48,452 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting adb4978aaf6d4fc3a18575a6a95f4c7e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734125444588 2024-12-13T21:30:48,452 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e466b0a91c14835aa0feca8e6b9daae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1734125445726 2024-12-13T21:30:48,452 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62d8be8a59624a9ab83eedc2c95b8fc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1734125445726 2024-12-13T21:30:48,452 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting cc612ef4819144dc9db02518003d9ad8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1734125446857 2024-12-13T21:30:48,452 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1bcb5ec8a714554b3ced9d51bd3e4b6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1734125446857 2024-12-13T21:30:48,459 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#A#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:48,460 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fb8c5eecc76a4d709c28a19681295ee6 is 50, key is test_row_0/A:col10/1734125447177/Put/seqid=0 2024-12-13T21:30:48,460 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#B#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:48,461 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/6fd4400b7d974c0092a5b171b7249d5d is 50, key is test_row_0/B:col10/1734125447177/Put/seqid=0 2024-12-13T21:30:48,461 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:48,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-13T21:30:48,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,462 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-13T21:30:48,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:48,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:48,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:48,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:48,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:48,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:48,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741950_1126 (size=13561) 2024-12-13T21:30:48,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741951_1127 (size=13561) 2024-12-13T21:30:48,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/2f5c524f66754d3a88026e5646725d5b is 50, key is test_row_0/A:col10/1734125447215/Put/seqid=0 2024-12-13T21:30:48,470 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/6fd4400b7d974c0092a5b171b7249d5d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6fd4400b7d974c0092a5b171b7249d5d 2024-12-13T21:30:48,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741952_1128 (size=12301) 2024-12-13T21:30:48,475 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/B of cc66b42faaed28a8693a712966f73789 into 6fd4400b7d974c0092a5b171b7249d5d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:48,475 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:48,475 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/B, priority=13, startTime=1734125448450; duration=0sec 2024-12-13T21:30:48,475 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:48,475 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:B 2024-12-13T21:30:48,475 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:48,476 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:48,476 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): cc66b42faaed28a8693a712966f73789/C is initiating minor compaction (all files) 2024-12-13T21:30:48,476 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc66b42faaed28a8693a712966f73789/C in TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:48,477 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/c5fbcbc351774df2b17f33630889b40c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/b7b5097b1e8a45929a25e10a5a6e5508, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4070514a410f40748caae50a9b04e9bf] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp, totalSize=37.2 K 2024-12-13T21:30:48,477 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c5fbcbc351774df2b17f33630889b40c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734125444588 2024-12-13T21:30:48,477 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b7b5097b1e8a45929a25e10a5a6e5508, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1734125445726 2024-12-13T21:30:48,477 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4070514a410f40748caae50a9b04e9bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1734125446857 2024-12-13T21:30:48,484 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc66b42faaed28a8693a712966f73789#C#compaction#114 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:48,485 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/bc2c63f2a91a4ae0974b2a4fd692690c is 50, key is test_row_0/C:col10/1734125447177/Put/seqid=0 2024-12-13T21:30:48,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:48,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741953_1129 (size=13561) 2024-12-13T21:30:48,872 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/2f5c524f66754d3a88026e5646725d5b 2024-12-13T21:30:48,877 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/fb8c5eecc76a4d709c28a19681295ee6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fb8c5eecc76a4d709c28a19681295ee6 2024-12-13T21:30:48,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/16d66c4f7db148da8365eecb1b446d44 is 50, key is test_row_0/B:col10/1734125447215/Put/seqid=0 2024-12-13T21:30:48,883 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/A of cc66b42faaed28a8693a712966f73789 into fb8c5eecc76a4d709c28a19681295ee6(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:48,883 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:48,883 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/A, priority=13, startTime=1734125448450; duration=0sec 2024-12-13T21:30:48,883 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:48,883 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:A 2024-12-13T21:30:48,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741954_1130 (size=12301) 2024-12-13T21:30:48,893 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/bc2c63f2a91a4ae0974b2a4fd692690c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/bc2c63f2a91a4ae0974b2a4fd692690c 2024-12-13T21:30:48,898 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc66b42faaed28a8693a712966f73789/C of cc66b42faaed28a8693a712966f73789 into bc2c63f2a91a4ae0974b2a4fd692690c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:30:48,898 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:48,898 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789., storeName=cc66b42faaed28a8693a712966f73789/C, priority=13, startTime=1734125448450; duration=0sec 2024-12-13T21:30:48,898 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:48,898 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc66b42faaed28a8693a712966f73789:C 2024-12-13T21:30:49,288 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/16d66c4f7db148da8365eecb1b446d44 2024-12-13T21:30:49,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/f3284cec4ef24c47b1bf8854b3871c09 is 50, key is test_row_0/C:col10/1734125447215/Put/seqid=0 2024-12-13T21:30:49,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741955_1131 (size=12301) 2024-12-13T21:30:49,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:49,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. as already flushing 2024-12-13T21:30:49,338 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39b10898 to 127.0.0.1:57927 2024-12-13T21:30:49,338 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:49,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:49,708 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/f3284cec4ef24c47b1bf8854b3871c09 2024-12-13T21:30:49,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/2f5c524f66754d3a88026e5646725d5b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/2f5c524f66754d3a88026e5646725d5b 2024-12-13T21:30:49,721 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/2f5c524f66754d3a88026e5646725d5b, entries=150, sequenceid=520, filesize=12.0 K 2024-12-13T21:30:49,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/16d66c4f7db148da8365eecb1b446d44 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/16d66c4f7db148da8365eecb1b446d44 2024-12-13T21:30:49,726 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/16d66c4f7db148da8365eecb1b446d44, entries=150, sequenceid=520, filesize=12.0 K 2024-12-13T21:30:49,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/f3284cec4ef24c47b1bf8854b3871c09 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f3284cec4ef24c47b1bf8854b3871c09 2024-12-13T21:30:49,730 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f3284cec4ef24c47b1bf8854b3871c09, entries=150, sequenceid=520, filesize=12.0 K 2024-12-13T21:30:49,731 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=6.71 KB/6870 for cc66b42faaed28a8693a712966f73789 in 1269ms, sequenceid=520, compaction requested=false 2024-12-13T21:30:49,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:49,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:49,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-12-13T21:30:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-12-13T21:30:49,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-13T21:30:49,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3520 sec 2024-12-13T21:30:49,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.3560 sec 2024-12-13T21:30:49,847 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d7115de to 127.0.0.1:57927 2024-12-13T21:30:49,847 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:49,879 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x30d4d4c6 to 127.0.0.1:57927 2024-12-13T21:30:49,879 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:49,881 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c943d to 127.0.0.1:57927 2024-12-13T21:30:49,882 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:49,884 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cac4303 to 127.0.0.1:57927 2024-12-13T21:30:49,884 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:51,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-13T21:30:51,490 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 279 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8599 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8306 2024-12-13T21:30:51,491 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-13T21:30:51,492 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3655 2024-12-13T21:30:51,492 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10961 rows 2024-12-13T21:30:51,492 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3629 2024-12-13T21:30:51,492 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10887 rows 2024-12-13T21:30:51,492 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:30:51,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e59596a to 127.0.0.1:57927 2024-12-13T21:30:51,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:30:51,499 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-13T21:30:51,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-12-13T21:30:51,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:51,508 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125451508"}]},"ts":"1734125451508"} 2024-12-13T21:30:51,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-13T21:30:51,509 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-13T21:30:51,540 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-13T21:30:51,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:30:51,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, UNASSIGN}] 2024-12-13T21:30:51,551 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, UNASSIGN 2024-12-13T21:30:51,552 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=cc66b42faaed28a8693a712966f73789, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:51,553 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:30:51,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:51,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-13T21:30:51,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:51,710 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:51,710 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:30:51,711 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing cc66b42faaed28a8693a712966f73789, disabling compactions & flushes 2024-12-13T21:30:51,711 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:51,711 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:51,711 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. after waiting 0 ms 2024-12-13T21:30:51,711 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:51,711 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing cc66b42faaed28a8693a712966f73789 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-13T21:30:51,711 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=A 2024-12-13T21:30:51,711 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:51,712 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=B 2024-12-13T21:30:51,712 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:51,712 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc66b42faaed28a8693a712966f73789, store=C 2024-12-13T21:30:51,712 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:51,716 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/05881bf49164435d9ba8a7c6eded8ed5 is 50, key is test_row_0/A:col10/1734125449877/Put/seqid=0 2024-12-13T21:30:51,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741956_1132 (size=12301) 2024-12-13T21:30:51,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-13T21:30:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-13T21:30:52,121 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/05881bf49164435d9ba8a7c6eded8ed5 2024-12-13T21:30:52,130 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c9a799a477694f3a971cdf3742079349 is 50, key is test_row_0/B:col10/1734125449877/Put/seqid=0 2024-12-13T21:30:52,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741957_1133 (size=12301) 2024-12-13T21:30:52,536 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c9a799a477694f3a971cdf3742079349 2024-12-13T21:30:52,549 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ffb937d223764d5382ec2d4337d7ac95 is 50, key is test_row_0/C:col10/1734125449877/Put/seqid=0 2024-12-13T21:30:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741958_1134 (size=12301) 2024-12-13T21:30:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-13T21:30:52,957 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ffb937d223764d5382ec2d4337d7ac95 2024-12-13T21:30:52,968 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/A/05881bf49164435d9ba8a7c6eded8ed5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05881bf49164435d9ba8a7c6eded8ed5 2024-12-13T21:30:52,974 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05881bf49164435d9ba8a7c6eded8ed5, entries=150, sequenceid=531, filesize=12.0 K 2024-12-13T21:30:52,975 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/B/c9a799a477694f3a971cdf3742079349 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9a799a477694f3a971cdf3742079349 2024-12-13T21:30:52,980 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9a799a477694f3a971cdf3742079349, entries=150, sequenceid=531, filesize=12.0 K 2024-12-13T21:30:52,981 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/.tmp/C/ffb937d223764d5382ec2d4337d7ac95 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ffb937d223764d5382ec2d4337d7ac95 2024-12-13T21:30:52,986 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ffb937d223764d5382ec2d4337d7ac95, entries=150, sequenceid=531, filesize=12.0 K 2024-12-13T21:30:52,987 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for cc66b42faaed28a8693a712966f73789 in 1276ms, sequenceid=531, compaction requested=true 2024-12-13T21:30:52,988 DEBUG [StoreCloser-TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fee04714259b47fcbc4b85477efaa914, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/38b39442bfe2498585495c7e87dac905, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/69d5c355c914456abbccfae606e2011b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05c50dca0efb4334acc0bc60d46326cd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/5b78f9661c45491a954f8a0cb0b231b1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/79b0dd5886314a87ae6407269eb59f98, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ca54c30942a14781ab1bb8f397a58a77, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/7a1e30fe4c2641b89a9f10724a5cb670, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/d2140b6c38bc48cf88421c5e64197df8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/41c709ef665447858a589b52ff3d1131, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3730fd47b68b453a84e48268b22aaf4f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/4a409301fc1b4ce6a7d8e84dba4cf05b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b8669f1a155445aa899424c9efccd830, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a0f1d276bf244d76b167a605f83db961, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ee2c3ae622af40268def7c7036fbae3f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/98c4ad085e9d4df486f34fcc0ff55e11, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/813673e8e5494c6f93e281b1d9718c5c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/883a9fb9cbee4110beb670a0ce3e58fc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc46f3bf9b934a0eb369bc496125558e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/123a956c21da476c8aa48d0d3b72ff96, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc3ae0ae42a34013ab45b01768f3633d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/0aa6762a430c4d178e6827ce46c41974, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1683cdfbf03b4a8fb3d07eafbe161f27, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3020a5b9ff8c413491e14389c8bbc88c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9bf69ef925fe42e1b340ce8fcb26b786, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/6b1508fcdf544259aa062214f6f2d77f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9b60866015cc48b9889635117048b81f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1a7180dc168a4e83996284b58de0bdb1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/350b63ecf4c142ec97d358ef2c1ea834, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/098b176a5e374c459db4d6f4c5effb31, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/61a27290bb22410593a46d524cd6add7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/65e5b38949214608a89d0234f82bc844, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b570db8db7d8449f8f87fca0b9a4adec, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/adb4978aaf6d4fc3a18575a6a95f4c7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/cacc8c51e02a4b3aa51055ae8edffe7b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/62d8be8a59624a9ab83eedc2c95b8fc2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a1bcb5ec8a714554b3ced9d51bd3e4b6] to archive 2024-12-13T21:30:52,992 DEBUG [StoreCloser-TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:30:53,000 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/79b0dd5886314a87ae6407269eb59f98 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/79b0dd5886314a87ae6407269eb59f98 2024-12-13T21:30:53,000 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/7a1e30fe4c2641b89a9f10724a5cb670 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/7a1e30fe4c2641b89a9f10724a5cb670 2024-12-13T21:30:53,000 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/38b39442bfe2498585495c7e87dac905 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/38b39442bfe2498585495c7e87dac905 2024-12-13T21:30:53,000 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ca54c30942a14781ab1bb8f397a58a77 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ca54c30942a14781ab1bb8f397a58a77 2024-12-13T21:30:53,001 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/69d5c355c914456abbccfae606e2011b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/69d5c355c914456abbccfae606e2011b 2024-12-13T21:30:53,001 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05c50dca0efb4334acc0bc60d46326cd to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05c50dca0efb4334acc0bc60d46326cd 2024-12-13T21:30:53,001 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fee04714259b47fcbc4b85477efaa914 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fee04714259b47fcbc4b85477efaa914 2024-12-13T21:30:53,001 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/5b78f9661c45491a954f8a0cb0b231b1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/5b78f9661c45491a954f8a0cb0b231b1 2024-12-13T21:30:53,003 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a0f1d276bf244d76b167a605f83db961 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a0f1d276bf244d76b167a605f83db961 2024-12-13T21:30:53,003 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/41c709ef665447858a589b52ff3d1131 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/41c709ef665447858a589b52ff3d1131 2024-12-13T21:30:53,004 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b8669f1a155445aa899424c9efccd830 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b8669f1a155445aa899424c9efccd830 2024-12-13T21:30:53,004 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ee2c3ae622af40268def7c7036fbae3f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/ee2c3ae622af40268def7c7036fbae3f 2024-12-13T21:30:53,004 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/4a409301fc1b4ce6a7d8e84dba4cf05b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/4a409301fc1b4ce6a7d8e84dba4cf05b 2024-12-13T21:30:53,004 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3730fd47b68b453a84e48268b22aaf4f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3730fd47b68b453a84e48268b22aaf4f 2024-12-13T21:30:53,005 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/98c4ad085e9d4df486f34fcc0ff55e11 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/98c4ad085e9d4df486f34fcc0ff55e11 2024-12-13T21:30:53,005 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/d2140b6c38bc48cf88421c5e64197df8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/d2140b6c38bc48cf88421c5e64197df8 2024-12-13T21:30:53,006 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/883a9fb9cbee4110beb670a0ce3e58fc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/883a9fb9cbee4110beb670a0ce3e58fc 2024-12-13T21:30:53,006 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc3ae0ae42a34013ab45b01768f3633d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc3ae0ae42a34013ab45b01768f3633d 2024-12-13T21:30:53,006 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/123a956c21da476c8aa48d0d3b72ff96 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/123a956c21da476c8aa48d0d3b72ff96 2024-12-13T21:30:53,006 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/813673e8e5494c6f93e281b1d9718c5c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/813673e8e5494c6f93e281b1d9718c5c 2024-12-13T21:30:53,006 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc46f3bf9b934a0eb369bc496125558e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fc46f3bf9b934a0eb369bc496125558e 2024-12-13T21:30:53,006 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/0aa6762a430c4d178e6827ce46c41974 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/0aa6762a430c4d178e6827ce46c41974 2024-12-13T21:30:53,007 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1683cdfbf03b4a8fb3d07eafbe161f27 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1683cdfbf03b4a8fb3d07eafbe161f27 2024-12-13T21:30:53,007 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3020a5b9ff8c413491e14389c8bbc88c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/3020a5b9ff8c413491e14389c8bbc88c 2024-12-13T21:30:53,008 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9bf69ef925fe42e1b340ce8fcb26b786 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9bf69ef925fe42e1b340ce8fcb26b786 2024-12-13T21:30:53,008 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/6b1508fcdf544259aa062214f6f2d77f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/6b1508fcdf544259aa062214f6f2d77f 2024-12-13T21:30:53,008 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9b60866015cc48b9889635117048b81f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/9b60866015cc48b9889635117048b81f 2024-12-13T21:30:53,008 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1a7180dc168a4e83996284b58de0bdb1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/1a7180dc168a4e83996284b58de0bdb1 2024-12-13T21:30:53,008 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/350b63ecf4c142ec97d358ef2c1ea834 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/350b63ecf4c142ec97d358ef2c1ea834 2024-12-13T21:30:53,009 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/098b176a5e374c459db4d6f4c5effb31 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/098b176a5e374c459db4d6f4c5effb31 2024-12-13T21:30:53,009 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/61a27290bb22410593a46d524cd6add7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/61a27290bb22410593a46d524cd6add7 2024-12-13T21:30:53,009 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/65e5b38949214608a89d0234f82bc844 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/65e5b38949214608a89d0234f82bc844 2024-12-13T21:30:53,009 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b570db8db7d8449f8f87fca0b9a4adec to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/b570db8db7d8449f8f87fca0b9a4adec 2024-12-13T21:30:53,009 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/adb4978aaf6d4fc3a18575a6a95f4c7e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/adb4978aaf6d4fc3a18575a6a95f4c7e 2024-12-13T21:30:53,010 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/cacc8c51e02a4b3aa51055ae8edffe7b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/cacc8c51e02a4b3aa51055ae8edffe7b 2024-12-13T21:30:53,010 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/62d8be8a59624a9ab83eedc2c95b8fc2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/62d8be8a59624a9ab83eedc2c95b8fc2 2024-12-13T21:30:53,010 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a1bcb5ec8a714554b3ced9d51bd3e4b6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/a1bcb5ec8a714554b3ced9d51bd3e4b6 2024-12-13T21:30:53,024 DEBUG [StoreCloser-TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f12579fa62154ea88cea17b2dce6d562, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/bddfd5f0d68447f28966759f7685c2a3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0cdeb1aec65d49c8bcbc27082055b6c5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fdd65570bd4649aebbe6b2aff9442bd1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/d830452fd225448aae09307ecd6637b0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a9beb0cb891e4ace9861f20b25009ec0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/92b2a61b524342c385b6f512ebc1de97, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/37045db7b9c84c2dbfadb318d1e628ff, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6eeaa293a0a44e469bcebf0eeecb620b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ebf28b2dfca42eca27ce7f401f9cb86, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/85c4d8f4bc964ba7ada97d9c7ec83d4b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce072c9edea342b687145d333f3ea2e7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/33cba21b846847a186991789f26b740b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce0669b387c74f6c9c72b5a9ec2aa96f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/77d92a7d49b34883aa1523005cf8459c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/21e64d47416d4c14a27842c43cfac3f9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/93f13f061f764663a893ce670c220dcf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a53e39b2c4934548b4eaf3defe758339, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/542b62f5d1534825a676343ad49de6d8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/1c586708b882456c8fb18275e95729d5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ce2d71a117d4803b8cd0dec1c5bea9b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7288437f174943e4a7b29a5a89958b7a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9ca727a65914bb69dd8b3bf14cb8d74, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f1bace814fb147f4a7b038687034bb7b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fc9d6de9e272494e873464b92725a2ab, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c893e7982d514cb5bdcc77dcdcd02bd7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/b7c8e94033a54538833ff1b15f8db39e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/62101a04052e41c581a4abbaf727ee9e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7864979fed3f48d0a1670f08837d26d8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c2472958e01f4cb982ed574dbc064128, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/20bd21644ed44cca878f43e7fafa4ea3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/8d447df0e6694764be7454af9a499bb3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/881a92e180ab46778ff98844bcb4ebe2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/32c0c426540341e7987f5cef0a6dd647, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0e466b0a91c14835aa0feca8e6b9daae, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/cc612ef4819144dc9db02518003d9ad8] to archive 2024-12-13T21:30:53,026 DEBUG [StoreCloser-TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0cdeb1aec65d49c8bcbc27082055b6c5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0cdeb1aec65d49c8bcbc27082055b6c5 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a9beb0cb891e4ace9861f20b25009ec0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a9beb0cb891e4ace9861f20b25009ec0 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/bddfd5f0d68447f28966759f7685c2a3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/bddfd5f0d68447f28966759f7685c2a3 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fdd65570bd4649aebbe6b2aff9442bd1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fdd65570bd4649aebbe6b2aff9442bd1 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/d830452fd225448aae09307ecd6637b0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/d830452fd225448aae09307ecd6637b0 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/37045db7b9c84c2dbfadb318d1e628ff to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/37045db7b9c84c2dbfadb318d1e628ff 2024-12-13T21:30:53,028 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/92b2a61b524342c385b6f512ebc1de97 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/92b2a61b524342c385b6f512ebc1de97 2024-12-13T21:30:53,029 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f12579fa62154ea88cea17b2dce6d562 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f12579fa62154ea88cea17b2dce6d562 2024-12-13T21:30:53,030 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ebf28b2dfca42eca27ce7f401f9cb86 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ebf28b2dfca42eca27ce7f401f9cb86 2024-12-13T21:30:53,030 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6eeaa293a0a44e469bcebf0eeecb620b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6eeaa293a0a44e469bcebf0eeecb620b 2024-12-13T21:30:53,030 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/85c4d8f4bc964ba7ada97d9c7ec83d4b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/85c4d8f4bc964ba7ada97d9c7ec83d4b 2024-12-13T21:30:53,030 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/33cba21b846847a186991789f26b740b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/33cba21b846847a186991789f26b740b 2024-12-13T21:30:53,030 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce0669b387c74f6c9c72b5a9ec2aa96f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce0669b387c74f6c9c72b5a9ec2aa96f 2024-12-13T21:30:53,030 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/77d92a7d49b34883aa1523005cf8459c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/77d92a7d49b34883aa1523005cf8459c 2024-12-13T21:30:53,031 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce072c9edea342b687145d333f3ea2e7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/ce072c9edea342b687145d333f3ea2e7 2024-12-13T21:30:53,031 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/21e64d47416d4c14a27842c43cfac3f9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/21e64d47416d4c14a27842c43cfac3f9 2024-12-13T21:30:53,032 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7fe62d5e6d3a4dd5b45e3b31f54b93e1 2024-12-13T21:30:53,032 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a53e39b2c4934548b4eaf3defe758339 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/a53e39b2c4934548b4eaf3defe758339 2024-12-13T21:30:53,032 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ce2d71a117d4803b8cd0dec1c5bea9b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/9ce2d71a117d4803b8cd0dec1c5bea9b 2024-12-13T21:30:53,033 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9ca727a65914bb69dd8b3bf14cb8d74 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9ca727a65914bb69dd8b3bf14cb8d74 2024-12-13T21:30:53,033 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/542b62f5d1534825a676343ad49de6d8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/542b62f5d1534825a676343ad49de6d8 2024-12-13T21:30:53,033 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7288437f174943e4a7b29a5a89958b7a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7288437f174943e4a7b29a5a89958b7a 2024-12-13T21:30:53,033 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/93f13f061f764663a893ce670c220dcf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/93f13f061f764663a893ce670c220dcf 2024-12-13T21:30:53,033 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/1c586708b882456c8fb18275e95729d5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/1c586708b882456c8fb18275e95729d5 2024-12-13T21:30:53,035 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fc9d6de9e272494e873464b92725a2ab to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/fc9d6de9e272494e873464b92725a2ab 2024-12-13T21:30:53,035 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f1bace814fb147f4a7b038687034bb7b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/f1bace814fb147f4a7b038687034bb7b 2024-12-13T21:30:53,035 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/b7c8e94033a54538833ff1b15f8db39e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/b7c8e94033a54538833ff1b15f8db39e 2024-12-13T21:30:53,035 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c893e7982d514cb5bdcc77dcdcd02bd7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c893e7982d514cb5bdcc77dcdcd02bd7 2024-12-13T21:30:53,035 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/62101a04052e41c581a4abbaf727ee9e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/62101a04052e41c581a4abbaf727ee9e 2024-12-13T21:30:53,036 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c2472958e01f4cb982ed574dbc064128 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c2472958e01f4cb982ed574dbc064128 2024-12-13T21:30:53,036 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7864979fed3f48d0a1670f08837d26d8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/7864979fed3f48d0a1670f08837d26d8 2024-12-13T21:30:53,036 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/20bd21644ed44cca878f43e7fafa4ea3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/20bd21644ed44cca878f43e7fafa4ea3 2024-12-13T21:30:53,037 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/8d447df0e6694764be7454af9a499bb3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/8d447df0e6694764be7454af9a499bb3 2024-12-13T21:30:53,038 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0e466b0a91c14835aa0feca8e6b9daae to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/0e466b0a91c14835aa0feca8e6b9daae 2024-12-13T21:30:53,038 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/32c0c426540341e7987f5cef0a6dd647 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/32c0c426540341e7987f5cef0a6dd647 2024-12-13T21:30:53,038 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/cc612ef4819144dc9db02518003d9ad8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/cc612ef4819144dc9db02518003d9ad8 2024-12-13T21:30:53,038 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/881a92e180ab46778ff98844bcb4ebe2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/881a92e180ab46778ff98844bcb4ebe2 2024-12-13T21:30:53,039 DEBUG [StoreCloser-TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e3d41718a04943238e85358014990b29, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4bad27e334504d8b82a34e8b5e31ed7c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/aba6b079b2534447a90ab67c2c7636f9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/73d1837ece124332b5016056e297c9d0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a4be1c4195d4410182fd51caefc018ca, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/42d84ac64f8b4e40a49e04bcf455fd5d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8c532a692517461590cadee3e189bc88, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0f0130ee5fdb46ccb9855b579d823711, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/93c683220bee4983862b183770322871, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1b25e6415fe24d6785f84a92347eafc5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/35d91f5e558f4d2ca2a85c1098d91806, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5fe546c27d084242ab7becb55c0b5260, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/160b33184d2445f7a76af6a7ea73fb05, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f9d79e486b094ffcbbd024e5595df534, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8359d1745d824f1ebf43983f83e5a4b2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ad9be2d299a24acea784456da2a72b44, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e384c4131f944028a25fa03ca7cb9795, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/99ecf9940b204bd5a1d96d78670a6151, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5899e9a29f254878a13b49fc117d0f0d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e9488cb704964ddbaebd250395afc725, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/baade9d899f241b9af09a5546a7fbcae, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/dffac29f815a4405bcf3cbd9b83cfa9c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4fcc7c0338004f4cad9bea1fb55b8bf1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/d829ec4afbdb4ae08d8509c7b8a53ed1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/6635f12916ba44c7865b987d33e61133, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0e60e1ec265545eabab309887b314415, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/eef54cf2a1564b4a8b9b8367953fef25, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8981ffba90324a5aaf95ed81519f18b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1799590e6869488b98bea2727d1b0293, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ecbf1f3af9004031b10f759ed32a6601, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/46d6b380c321406cb455aa88ea82ae8b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8780a0b09b5745da9938d6f5364279b2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a139a775b08e4e1fbeb270d550d35a84, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/c5fbcbc351774df2b17f33630889b40c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1c1b00b6f03647e5a7554f6e57a8f615, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/b7b5097b1e8a45929a25e10a5a6e5508, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4070514a410f40748caae50a9b04e9bf] to archive 2024-12-13T21:30:53,040 DEBUG [StoreCloser-TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e3d41718a04943238e85358014990b29 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e3d41718a04943238e85358014990b29 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/73d1837ece124332b5016056e297c9d0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/73d1837ece124332b5016056e297c9d0 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/42d84ac64f8b4e40a49e04bcf455fd5d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/42d84ac64f8b4e40a49e04bcf455fd5d 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/aba6b079b2534447a90ab67c2c7636f9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/aba6b079b2534447a90ab67c2c7636f9 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a4be1c4195d4410182fd51caefc018ca to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a4be1c4195d4410182fd51caefc018ca 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0f0130ee5fdb46ccb9855b579d823711 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0f0130ee5fdb46ccb9855b579d823711 2024-12-13T21:30:53,043 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8c532a692517461590cadee3e189bc88 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8c532a692517461590cadee3e189bc88 2024-12-13T21:30:53,042 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4bad27e334504d8b82a34e8b5e31ed7c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4bad27e334504d8b82a34e8b5e31ed7c 2024-12-13T21:30:53,044 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5fe546c27d084242ab7becb55c0b5260 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5fe546c27d084242ab7becb55c0b5260 2024-12-13T21:30:53,044 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/35d91f5e558f4d2ca2a85c1098d91806 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/35d91f5e558f4d2ca2a85c1098d91806 2024-12-13T21:30:53,044 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1b25e6415fe24d6785f84a92347eafc5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1b25e6415fe24d6785f84a92347eafc5 2024-12-13T21:30:53,044 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/160b33184d2445f7a76af6a7ea73fb05 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/160b33184d2445f7a76af6a7ea73fb05 2024-12-13T21:30:53,045 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f9d79e486b094ffcbbd024e5595df534 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f9d79e486b094ffcbbd024e5595df534 2024-12-13T21:30:53,045 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8359d1745d824f1ebf43983f83e5a4b2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8359d1745d824f1ebf43983f83e5a4b2 2024-12-13T21:30:53,045 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ad9be2d299a24acea784456da2a72b44 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ad9be2d299a24acea784456da2a72b44 2024-12-13T21:30:53,045 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/93c683220bee4983862b183770322871 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/93c683220bee4983862b183770322871 2024-12-13T21:30:53,047 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e384c4131f944028a25fa03ca7cb9795 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e384c4131f944028a25fa03ca7cb9795 2024-12-13T21:30:53,047 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/baade9d899f241b9af09a5546a7fbcae to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/baade9d899f241b9af09a5546a7fbcae 2024-12-13T21:30:53,047 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4fcc7c0338004f4cad9bea1fb55b8bf1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4fcc7c0338004f4cad9bea1fb55b8bf1 2024-12-13T21:30:53,047 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/99ecf9940b204bd5a1d96d78670a6151 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/99ecf9940b204bd5a1d96d78670a6151 2024-12-13T21:30:53,047 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/dffac29f815a4405bcf3cbd9b83cfa9c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/dffac29f815a4405bcf3cbd9b83cfa9c 2024-12-13T21:30:53,047 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e9488cb704964ddbaebd250395afc725 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/e9488cb704964ddbaebd250395afc725 2024-12-13T21:30:53,048 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5899e9a29f254878a13b49fc117d0f0d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/5899e9a29f254878a13b49fc117d0f0d 2024-12-13T21:30:53,048 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/d829ec4afbdb4ae08d8509c7b8a53ed1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/d829ec4afbdb4ae08d8509c7b8a53ed1 2024-12-13T21:30:53,049 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/6635f12916ba44c7865b987d33e61133 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/6635f12916ba44c7865b987d33e61133 2024-12-13T21:30:53,049 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0e60e1ec265545eabab309887b314415 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/0e60e1ec265545eabab309887b314415 2024-12-13T21:30:53,050 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/eef54cf2a1564b4a8b9b8367953fef25 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/eef54cf2a1564b4a8b9b8367953fef25 2024-12-13T21:30:53,050 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8981ffba90324a5aaf95ed81519f18b9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8981ffba90324a5aaf95ed81519f18b9 2024-12-13T21:30:53,050 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1799590e6869488b98bea2727d1b0293 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1799590e6869488b98bea2727d1b0293 2024-12-13T21:30:53,050 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8780a0b09b5745da9938d6f5364279b2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/8780a0b09b5745da9938d6f5364279b2 2024-12-13T21:30:53,050 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/46d6b380c321406cb455aa88ea82ae8b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/46d6b380c321406cb455aa88ea82ae8b 2024-12-13T21:30:53,050 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ecbf1f3af9004031b10f759ed32a6601 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ecbf1f3af9004031b10f759ed32a6601 2024-12-13T21:30:53,051 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a139a775b08e4e1fbeb270d550d35a84 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/a139a775b08e4e1fbeb270d550d35a84 2024-12-13T21:30:53,051 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/c5fbcbc351774df2b17f33630889b40c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/c5fbcbc351774df2b17f33630889b40c 2024-12-13T21:30:53,051 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1c1b00b6f03647e5a7554f6e57a8f615 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/1c1b00b6f03647e5a7554f6e57a8f615 2024-12-13T21:30:53,051 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/b7b5097b1e8a45929a25e10a5a6e5508 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/b7b5097b1e8a45929a25e10a5a6e5508 2024-12-13T21:30:53,051 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4070514a410f40748caae50a9b04e9bf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/4070514a410f40748caae50a9b04e9bf 2024-12-13T21:30:53,056 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/recovered.edits/534.seqid, newMaxSeqId=534, maxSeqId=1 2024-12-13T21:30:53,058 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789. 2024-12-13T21:30:53,059 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for cc66b42faaed28a8693a712966f73789: 2024-12-13T21:30:53,060 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:53,061 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=cc66b42faaed28a8693a712966f73789, regionState=CLOSED 2024-12-13T21:30:53,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-13T21:30:53,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure cc66b42faaed28a8693a712966f73789, server=fd052dae32be,38989,1734125418878 in 1.5090 sec 2024-12-13T21:30:53,064 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-13T21:30:53,064 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc66b42faaed28a8693a712966f73789, UNASSIGN in 1.5130 sec 2024-12-13T21:30:53,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-13T21:30:53,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5230 sec 2024-12-13T21:30:53,067 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125453067"}]},"ts":"1734125453067"} 2024-12-13T21:30:53,068 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-13T21:30:53,115 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-13T21:30:53,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6120 sec 2024-12-13T21:30:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-13T21:30:53,617 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-13T21:30:53,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-12-13T21:30:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,633 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,635 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T21:30:53,637 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:53,643 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/recovered.edits] 2024-12-13T21:30:53,647 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05881bf49164435d9ba8a7c6eded8ed5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/05881bf49164435d9ba8a7c6eded8ed5 2024-12-13T21:30:53,647 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fb8c5eecc76a4d709c28a19681295ee6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/fb8c5eecc76a4d709c28a19681295ee6 2024-12-13T21:30:53,647 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/2f5c524f66754d3a88026e5646725d5b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/A/2f5c524f66754d3a88026e5646725d5b 2024-12-13T21:30:53,650 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/16d66c4f7db148da8365eecb1b446d44 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/16d66c4f7db148da8365eecb1b446d44 2024-12-13T21:30:53,650 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6fd4400b7d974c0092a5b171b7249d5d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/6fd4400b7d974c0092a5b171b7249d5d 2024-12-13T21:30:53,650 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9a799a477694f3a971cdf3742079349 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/B/c9a799a477694f3a971cdf3742079349 2024-12-13T21:30:53,654 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f3284cec4ef24c47b1bf8854b3871c09 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/f3284cec4ef24c47b1bf8854b3871c09 2024-12-13T21:30:53,654 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/bc2c63f2a91a4ae0974b2a4fd692690c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/bc2c63f2a91a4ae0974b2a4fd692690c 2024-12-13T21:30:53,654 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ffb937d223764d5382ec2d4337d7ac95 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/C/ffb937d223764d5382ec2d4337d7ac95 2024-12-13T21:30:53,657 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/recovered.edits/534.seqid to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789/recovered.edits/534.seqid 2024-12-13T21:30:53,657 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/cc66b42faaed28a8693a712966f73789 2024-12-13T21:30:53,657 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-13T21:30:53,663 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-13T21:30:53,670 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-13T21:30:53,703 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-13T21:30:53,705 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,705 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-13T21:30:53,705 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734125453705"}]},"ts":"9223372036854775807"} 2024-12-13T21:30:53,708 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T21:30:53,708 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cc66b42faaed28a8693a712966f73789, NAME => 'TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T21:30:53,709 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-13T21:30:53,709 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734125453709"}]},"ts":"9223372036854775807"} 2024-12-13T21:30:53,712 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-13T21:30:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T21:30:53,757 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 129 msec 2024-12-13T21:30:53,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T21:30:53,939 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-12-13T21:30:53,956 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=247 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1126771990_22 at /127.0.0.1:56258 [Waiting for operation #285] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_656307843_22 at /127.0.0.1:54128 [Waiting for operation #208] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/fd052dae32be:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1126771990_22 at /127.0.0.1:48598 [Waiting for operation #262] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;fd052dae32be:38989-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=269 (was 154) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2826 (was 3413) 2024-12-13T21:30:53,965 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=247, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=269, ProcessCount=11, AvailableMemoryMB=2825 2024-12-13T21:30:53,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:30:53,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:30:53,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:53,968 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:30:53,969 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:53,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-12-13T21:30:53,969 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:30:53,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-13T21:30:53,971 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T21:30:53,973 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T21:30:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741959_1135 (size=963) 2024-12-13T21:30:54,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-13T21:30:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-13T21:30:54,410 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:30:54,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741960_1136 (size=53) 2024-12-13T21:30:54,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-13T21:30:54,822 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:54,822 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e2eceadaa1cf76613a4f5d367b5ca446, disabling compactions & flushes 2024-12-13T21:30:54,822 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:54,822 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:54,823 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. after waiting 0 ms 2024-12-13T21:30:54,823 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:54,823 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:54,823 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:54,825 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:30:54,825 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734125454825"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125454825"}]},"ts":"1734125454825"} 2024-12-13T21:30:54,828 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:30:54,829 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:30:54,830 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125454829"}]},"ts":"1734125454829"} 2024-12-13T21:30:54,832 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-13T21:30:54,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, ASSIGN}] 2024-12-13T21:30:54,885 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, ASSIGN 2024-12-13T21:30:54,887 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:30:55,038 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:55,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:55,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-13T21:30:55,195 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:55,199 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:55,199 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:30:55,200 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,200 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:55,200 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,200 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,202 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,203 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:55,203 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2eceadaa1cf76613a4f5d367b5ca446 columnFamilyName A 2024-12-13T21:30:55,204 DEBUG [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:55,204 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(327): Store=e2eceadaa1cf76613a4f5d367b5ca446/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:55,204 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,206 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:55,207 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2eceadaa1cf76613a4f5d367b5ca446 columnFamilyName B 2024-12-13T21:30:55,207 DEBUG [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:55,208 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(327): Store=e2eceadaa1cf76613a4f5d367b5ca446/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:55,208 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,210 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:55,210 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2eceadaa1cf76613a4f5d367b5ca446 columnFamilyName C 2024-12-13T21:30:55,210 DEBUG [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:55,211 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(327): Store=e2eceadaa1cf76613a4f5d367b5ca446/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:55,211 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:55,212 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,213 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,215 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:30:55,216 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:55,219 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:30:55,219 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened e2eceadaa1cf76613a4f5d367b5ca446; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72451255, jitterRate=0.07960782945156097}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:30:55,220 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:55,220 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., pid=41, masterSystemTime=1734125455195 2024-12-13T21:30:55,222 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:55,222 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:55,222 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:55,224 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-13T21:30:55,225 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 in 181 msec 2024-12-13T21:30:55,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-13T21:30:55,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, ASSIGN in 342 msec 2024-12-13T21:30:55,227 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:30:55,227 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125455227"}]},"ts":"1734125455227"} 2024-12-13T21:30:55,228 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-13T21:30:55,275 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:30:55,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3080 sec 2024-12-13T21:30:56,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-13T21:30:56,080 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-13T21:30:56,083 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46dc1373 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4359d2ac 2024-12-13T21:30:56,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dd2428, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:56,131 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:56,132 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:56,134 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:30:56,136 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:30:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:30:56,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:30:56,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-13T21:30:56,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741961_1137 (size=999) 2024-12-13T21:30:56,558 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-13T21:30:56,558 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-13T21:30:56,561 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:30:56,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, REOPEN/MOVE}] 2024-12-13T21:30:56,570 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, REOPEN/MOVE 2024-12-13T21:30:56,570 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:56,571 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:30:56,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:56,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:56,724 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:56,724 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:30:56,724 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing e2eceadaa1cf76613a4f5d367b5ca446, disabling compactions & flushes 2024-12-13T21:30:56,724 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:56,724 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:56,724 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. after waiting 0 ms 2024-12-13T21:30:56,724 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:56,732 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-13T21:30:56,733 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:56,733 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:56,733 WARN [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: e2eceadaa1cf76613a4f5d367b5ca446 to self. 2024-12-13T21:30:56,735 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:56,736 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=CLOSED 2024-12-13T21:30:56,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-12-13T21:30:56,740 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, REOPEN/MOVE; state=CLOSED, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=true 2024-12-13T21:30:56,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 in 166 msec 2024-12-13T21:30:56,891 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:56,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:30:57,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,056 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,056 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:30:57,056 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,057 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:30:57,057 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,057 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,060 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,061 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:57,066 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2eceadaa1cf76613a4f5d367b5ca446 columnFamilyName A 2024-12-13T21:30:57,069 DEBUG [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:57,069 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(327): Store=e2eceadaa1cf76613a4f5d367b5ca446/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:57,070 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,070 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:57,070 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2eceadaa1cf76613a4f5d367b5ca446 columnFamilyName B 2024-12-13T21:30:57,071 DEBUG [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:57,071 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(327): Store=e2eceadaa1cf76613a4f5d367b5ca446/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:57,071 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,072 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:30:57,072 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2eceadaa1cf76613a4f5d367b5ca446 columnFamilyName C 2024-12-13T21:30:57,072 DEBUG [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:57,072 INFO [StoreOpener-e2eceadaa1cf76613a4f5d367b5ca446-1 {}] regionserver.HStore(327): Store=e2eceadaa1cf76613a4f5d367b5ca446/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:30:57,073 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,073 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,074 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,075 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:30:57,077 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,077 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened e2eceadaa1cf76613a4f5d367b5ca446; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59184804, jitterRate=-0.11807769536972046}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:30:57,079 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:57,080 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., pid=46, masterSystemTime=1734125457049 2024-12-13T21:30:57,081 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,081 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,082 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=OPEN, openSeqNum=5, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-12-13T21:30:57,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 in 188 msec 2024-12-13T21:30:57,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-13T21:30:57,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, REOPEN/MOVE in 515 msec 2024-12-13T21:30:57,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-13T21:30:57,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 526 msec 2024-12-13T21:30:57,092 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 947 msec 2024-12-13T21:30:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-12-13T21:30:57,101 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x748ab582 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a7bf7fd 2024-12-13T21:30:57,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ecfd53a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,167 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x28c904d8 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15736fcc 2024-12-13T21:30:57,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7eb70b3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,184 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74be9bc0 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24ebde20 2024-12-13T21:30:57,191 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c517130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,193 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x776c0cb7 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@555bfdff 2024-12-13T21:30:57,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ec46f90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,200 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d5e0e3f to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62c6fdab 2024-12-13T21:30:57,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f63b68c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,209 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x250a1de4 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473f181f 2024-12-13T21:30:57,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@681a05ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,217 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49456175 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@768577a2 2024-12-13T21:30:57,224 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e0829fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,226 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d919649 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60f4ce3e 2024-12-13T21:30:57,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f3c742a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6974f84e to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f375e60 2024-12-13T21:30:57,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23a6e288, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:30:57,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:30:57,246 DEBUG [hconnection-0xcb664c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,247 DEBUG [hconnection-0x1a30796a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,247 DEBUG [hconnection-0x212cf3da-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-13T21:30:57,248 DEBUG [hconnection-0x724e4334-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,249 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,249 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:30:57,249 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:30:57,250 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,250 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:30:57,250 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:30:57,252 DEBUG [hconnection-0x6739b214-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,253 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,257 DEBUG [hconnection-0x3ced632b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,257 DEBUG [hconnection-0x2368b140-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,258 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,259 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:30:57,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:30:57,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:57,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:30:57,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:57,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:30:57,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:57,271 DEBUG [hconnection-0x4cbf8dae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,272 DEBUG [hconnection-0x1794862c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:30:57,277 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,277 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,277 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:30:57,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125517294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125517295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125517298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412135e7f73c12b16403cae35a1735d75d12e_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125457262/Put/seqid=0 2024-12-13T21:30:57,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125517303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125517303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741962_1138 (size=12154) 2024-12-13T21:30:57,332 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:57,337 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412135e7f73c12b16403cae35a1735d75d12e_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412135e7f73c12b16403cae35a1735d75d12e_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,339 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/2220731e59f540df82a11dd3ebb24893, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:57,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/2220731e59f540df82a11dd3ebb24893 is 175, key is test_row_0/A:col10/1734125457262/Put/seqid=0 2024-12-13T21:30:57,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:30:57,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741963_1139 (size=30955) 2024-12-13T21:30:57,402 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:57,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:57,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125517403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125517404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125517406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125517409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125517411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:30:57,556 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:57,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:57,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125517606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125517607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125517608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125517613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125517613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:57,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:57,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,771 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/2220731e59f540df82a11dd3ebb24893 2024-12-13T21:30:57,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/c6c7104348064a149cd5d7e4830d15d4 is 50, key is test_row_0/B:col10/1734125457262/Put/seqid=0 2024-12-13T21:30:57,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741964_1140 (size=12001) 2024-12-13T21:30:57,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/c6c7104348064a149cd5d7e4830d15d4 2024-12-13T21:30:57,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/48289310d9d04cd889b68fd3694e44ad is 50, key is test_row_0/C:col10/1734125457262/Put/seqid=0 2024-12-13T21:30:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:30:57,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741965_1141 (size=12001) 2024-12-13T21:30:57,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/48289310d9d04cd889b68fd3694e44ad 2024-12-13T21:30:57,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:57,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:57,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:57,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/2220731e59f540df82a11dd3ebb24893 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893 2024-12-13T21:30:57,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:57,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893, entries=150, sequenceid=16, filesize=30.2 K 2024-12-13T21:30:57,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/c6c7104348064a149cd5d7e4830d15d4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c6c7104348064a149cd5d7e4830d15d4 2024-12-13T21:30:57,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c6c7104348064a149cd5d7e4830d15d4, entries=150, sequenceid=16, filesize=11.7 K 2024-12-13T21:30:57,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/48289310d9d04cd889b68fd3694e44ad as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/48289310d9d04cd889b68fd3694e44ad 2024-12-13T21:30:57,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/48289310d9d04cd889b68fd3694e44ad, entries=150, sequenceid=16, filesize=11.7 K 2024-12-13T21:30:57,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e2eceadaa1cf76613a4f5d367b5ca446 in 630ms, sequenceid=16, compaction requested=false 2024-12-13T21:30:57,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:57,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-13T21:30:57,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:30:57,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:57,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:30:57,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:57,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:30:57,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:57,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125517960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125517964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125517967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125517962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125517966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:57,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b10f3baef3304e4b8c357ae140a3a776_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125457925/Put/seqid=0 2024-12-13T21:30:57,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741966_1142 (size=12154) 2024-12-13T21:30:58,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125518068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125518069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125518070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125518070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125518074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125518272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125518273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125518273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125518273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125518279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:30:58,398 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:58,404 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b10f3baef3304e4b8c357ae140a3a776_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b10f3baef3304e4b8c357ae140a3a776_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:58,405 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/02c46d57e7764fb88ce406baaeeb2ef0, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:58,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/02c46d57e7764fb88ce406baaeeb2ef0 is 175, key is test_row_0/A:col10/1734125457925/Put/seqid=0 2024-12-13T21:30:58,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741967_1143 (size=30955) 2024-12-13T21:30:58,413 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=44, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/02c46d57e7764fb88ce406baaeeb2ef0 2024-12-13T21:30:58,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/6b5bd8750e184fb580374498f2ae20f1 is 50, key is test_row_0/B:col10/1734125457925/Put/seqid=0 2024-12-13T21:30:58,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741968_1144 (size=12001) 2024-12-13T21:30:58,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/6b5bd8750e184fb580374498f2ae20f1 2024-12-13T21:30:58,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/1b5cfdaad07d4a399b9d370b4efa8c91 is 50, key is test_row_0/C:col10/1734125457925/Put/seqid=0 2024-12-13T21:30:58,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741969_1145 (size=12001) 2024-12-13T21:30:58,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/1b5cfdaad07d4a399b9d370b4efa8c91 2024-12-13T21:30:58,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/02c46d57e7764fb88ce406baaeeb2ef0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0 2024-12-13T21:30:58,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0, entries=150, sequenceid=44, filesize=30.2 K 2024-12-13T21:30:58,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/6b5bd8750e184fb580374498f2ae20f1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6b5bd8750e184fb580374498f2ae20f1 2024-12-13T21:30:58,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6b5bd8750e184fb580374498f2ae20f1, entries=150, sequenceid=44, filesize=11.7 K 2024-12-13T21:30:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/1b5cfdaad07d4a399b9d370b4efa8c91 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1b5cfdaad07d4a399b9d370b4efa8c91 2024-12-13T21:30:58,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1b5cfdaad07d4a399b9d370b4efa8c91, entries=150, sequenceid=44, filesize=11.7 K 2024-12-13T21:30:58,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for e2eceadaa1cf76613a4f5d367b5ca446 in 554ms, sequenceid=44, compaction requested=false 2024-12-13T21:30:58,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:58,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:30:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:30:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:30:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:30:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:58,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213cee8f5691d0e4b6a90644dc9ec9557d8_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125458578/Put/seqid=0 2024-12-13T21:30:58,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125518605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125518604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,611 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:30:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125518609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125518609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125518609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741970_1146 (size=19474) 2024-12-13T21:30:58,634 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:58,641 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213cee8f5691d0e4b6a90644dc9ec9557d8_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213cee8f5691d0e4b6a90644dc9ec9557d8_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:58,643 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/1bc9242bbfbd4893adae6c6a141cb1f1, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:58,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/1bc9242bbfbd4893adae6c6a141cb1f1 is 175, key is test_row_0/A:col10/1734125458578/Put/seqid=0 2024-12-13T21:30:58,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741971_1147 (size=56733) 2024-12-13T21:30:58,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125518711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125518711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125518720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125518719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125518721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125518914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125518915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125518923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125518924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125518924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:58,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:58,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:58,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:58,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:58,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,076 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/1bc9242bbfbd4893adae6c6a141cb1f1 2024-12-13T21:30:59,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:59,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:59,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/1c0d252fd85f4fe7bf9f614b11b70aa3 is 50, key is test_row_0/B:col10/1734125458578/Put/seqid=0 2024-12-13T21:30:59,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741972_1148 (size=12001) 2024-12-13T21:30:59,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125519217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125519223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125519226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125519227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125519227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:59,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:59,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:30:59,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/1c0d252fd85f4fe7bf9f614b11b70aa3 2024-12-13T21:30:59,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/98cca9fc87bd4cdd91692a8e21edd0bb is 50, key is test_row_0/C:col10/1734125458578/Put/seqid=0 2024-12-13T21:30:59,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741973_1149 (size=12001) 2024-12-13T21:30:59,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/98cca9fc87bd4cdd91692a8e21edd0bb 2024-12-13T21:30:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/1bc9242bbfbd4893adae6c6a141cb1f1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1 2024-12-13T21:30:59,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1, entries=300, sequenceid=56, filesize=55.4 K 2024-12-13T21:30:59,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/1c0d252fd85f4fe7bf9f614b11b70aa3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/1c0d252fd85f4fe7bf9f614b11b70aa3 2024-12-13T21:30:59,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/1c0d252fd85f4fe7bf9f614b11b70aa3, entries=150, sequenceid=56, filesize=11.7 K 2024-12-13T21:30:59,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/98cca9fc87bd4cdd91692a8e21edd0bb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/98cca9fc87bd4cdd91692a8e21edd0bb 2024-12-13T21:30:59,547 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:30:59,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/98cca9fc87bd4cdd91692a8e21edd0bb, entries=150, sequenceid=56, filesize=11.7 K 2024-12-13T21:30:59,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e2eceadaa1cf76613a4f5d367b5ca446 in 977ms, sequenceid=56, compaction requested=true 2024-12-13T21:30:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:30:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:30:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:59,556 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:30:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:30:59,556 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:30:59,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:30:59,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:30:59,558 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:59,558 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:30:59,559 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,559 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=115.9 K 2024-12-13T21:30:59,559 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,559 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1] 2024-12-13T21:30:59,559 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:30:59,560 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:30:59,560 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,560 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c6c7104348064a149cd5d7e4830d15d4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6b5bd8750e184fb580374498f2ae20f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/1c0d252fd85f4fe7bf9f614b11b70aa3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=35.2 K 2024-12-13T21:30:59,560 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c6c7104348064a149cd5d7e4830d15d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734125457259 2024-12-13T21:30:59,560 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2220731e59f540df82a11dd3ebb24893, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734125457259 2024-12-13T21:30:59,561 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02c46d57e7764fb88ce406baaeeb2ef0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1734125457920 2024-12-13T21:30:59,561 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b5bd8750e184fb580374498f2ae20f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1734125457920 2024-12-13T21:30:59,561 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bc9242bbfbd4893adae6c6a141cb1f1, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734125457959 2024-12-13T21:30:59,561 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c0d252fd85f4fe7bf9f614b11b70aa3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734125458576 2024-12-13T21:30:59,573 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:59,574 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/244a4c55882d4988957fcb3776a5f012 is 50, key is test_row_0/B:col10/1734125458578/Put/seqid=0 2024-12-13T21:30:59,575 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:59,579 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213620121a183c34c108c678c3fe49160cb_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:59,583 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213620121a183c34c108c678c3fe49160cb_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:59,583 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213620121a183c34c108c678c3fe49160cb_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:59,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741974_1150 (size=12104) 2024-12-13T21:30:59,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741975_1151 (size=4469) 2024-12-13T21:30:59,602 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#130 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:30:59,604 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/39eff9ba4308430fb7d7ebaaae184a73 is 175, key is test_row_0/A:col10/1734125458578/Put/seqid=0 2024-12-13T21:30:59,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741976_1152 (size=31058) 2024-12-13T21:30:59,700 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-13T21:30:59,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:30:59,701 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:30:59,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:30:59,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:59,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:30:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:30:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:30:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213c8b237b75777419b8567ce7451664a7a_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125458604/Put/seqid=0 2024-12-13T21:30:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:59,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:30:59,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741977_1153 (size=12154) 2024-12-13T21:30:59,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:30:59,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125519737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125519738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125519738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125519740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,748 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213c8b237b75777419b8567ce7451664a7a_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c8b237b75777419b8567ce7451664a7a_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:30:59,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/589862ff29c44d6991284757003c712d, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:30:59,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/589862ff29c44d6991284757003c712d is 175, key is test_row_0/A:col10/1734125458604/Put/seqid=0 2024-12-13T21:30:59,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125519751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741978_1154 (size=30955) 2024-12-13T21:30:59,788 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/589862ff29c44d6991284757003c712d 2024-12-13T21:30:59,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/8ceef386b8a840ffa83e060b3404a0c4 is 50, key is test_row_0/B:col10/1734125458604/Put/seqid=0 2024-12-13T21:30:59,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741979_1155 (size=12001) 2024-12-13T21:30:59,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125519841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125519842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125519843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:30:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125519843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:30:59,994 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/244a4c55882d4988957fcb3776a5f012 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/244a4c55882d4988957fcb3776a5f012 2024-12-13T21:31:00,002 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into 244a4c55882d4988957fcb3776a5f012(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:00,002 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:00,002 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125459556; duration=0sec 2024-12-13T21:31:00,002 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:00,002 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:00,002 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:00,003 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:00,003 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:00,004 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:00,004 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/48289310d9d04cd889b68fd3694e44ad, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1b5cfdaad07d4a399b9d370b4efa8c91, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/98cca9fc87bd4cdd91692a8e21edd0bb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=35.2 K 2024-12-13T21:31:00,004 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 48289310d9d04cd889b68fd3694e44ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734125457259 2024-12-13T21:31:00,005 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b5cfdaad07d4a399b9d370b4efa8c91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1734125457920 2024-12-13T21:31:00,006 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 98cca9fc87bd4cdd91692a8e21edd0bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734125458576 2024-12-13T21:31:00,028 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/39eff9ba4308430fb7d7ebaaae184a73 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/39eff9ba4308430fb7d7ebaaae184a73 2024-12-13T21:31:00,032 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#133 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:00,033 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/369ad4107d984985bd4ac7b7fc15895e is 50, key is test_row_0/C:col10/1734125458578/Put/seqid=0 2024-12-13T21:31:00,044 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 39eff9ba4308430fb7d7ebaaae184a73(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:00,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:00,045 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125459556; duration=0sec 2024-12-13T21:31:00,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:00,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:00,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125520045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125520046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125520047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125520051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741980_1156 (size=12104) 2024-12-13T21:31:00,225 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/8ceef386b8a840ffa83e060b3404a0c4 2024-12-13T21:31:00,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/4fdce5a31de144cabe6948ca5e0cf9e2 is 50, key is test_row_0/C:col10/1734125458604/Put/seqid=0 2024-12-13T21:31:00,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741981_1157 (size=12001) 2024-12-13T21:31:00,250 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/4fdce5a31de144cabe6948ca5e0cf9e2 2024-12-13T21:31:00,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/589862ff29c44d6991284757003c712d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d 2024-12-13T21:31:00,262 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d, entries=150, sequenceid=80, filesize=30.2 K 2024-12-13T21:31:00,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/8ceef386b8a840ffa83e060b3404a0c4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8ceef386b8a840ffa83e060b3404a0c4 2024-12-13T21:31:00,268 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8ceef386b8a840ffa83e060b3404a0c4, entries=150, sequenceid=80, filesize=11.7 K 2024-12-13T21:31:00,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/4fdce5a31de144cabe6948ca5e0cf9e2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/4fdce5a31de144cabe6948ca5e0cf9e2 2024-12-13T21:31:00,275 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/4fdce5a31de144cabe6948ca5e0cf9e2, entries=150, sequenceid=80, filesize=11.7 K 2024-12-13T21:31:00,276 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e2eceadaa1cf76613a4f5d367b5ca446 in 575ms, sequenceid=80, compaction requested=false 2024-12-13T21:31:00,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:00,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:00,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-13T21:31:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-13T21:31:00,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-13T21:31:00,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0270 sec 2024-12-13T21:31:00,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 3.0330 sec 2024-12-13T21:31:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:00,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:31:00,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:00,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:00,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:00,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:00,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:00,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:00,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121358cee122f5b7471d8aa02375531ff178_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:00,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741982_1158 (size=14594) 2024-12-13T21:31:00,371 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:00,379 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121358cee122f5b7471d8aa02375531ff178_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121358cee122f5b7471d8aa02375531ff178_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:00,380 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/3a7563e785b14cff886ec3e8df0338c6, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:00,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/3a7563e785b14cff886ec3e8df0338c6 is 175, key is test_row_0/A:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:00,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125520381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125520382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125520387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741983_1159 (size=39549) 2024-12-13T21:31:00,402 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/3a7563e785b14cff886ec3e8df0338c6 2024-12-13T21:31:00,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125520401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/b22ada1f0b6e462f86ff619b665c9214 is 50, key is test_row_0/B:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:00,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741984_1160 (size=12001) 2024-12-13T21:31:00,474 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/369ad4107d984985bd4ac7b7fc15895e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/369ad4107d984985bd4ac7b7fc15895e 2024-12-13T21:31:00,479 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into 369ad4107d984985bd4ac7b7fc15895e(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:00,479 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:00,479 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=13, startTime=1734125459556; duration=0sec 2024-12-13T21:31:00,479 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:00,479 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:00,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125520489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125520489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125520492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125520505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125520693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125520693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125520695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125520708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:00,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125520764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:00,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/b22ada1f0b6e462f86ff619b665c9214 2024-12-13T21:31:00,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/3c683a97cfc847d9822308373696c51c is 50, key is test_row_0/C:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:00,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741985_1161 (size=12001) 2024-12-13T21:31:00,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/3c683a97cfc847d9822308373696c51c 2024-12-13T21:31:00,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/3a7563e785b14cff886ec3e8df0338c6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6 2024-12-13T21:31:00,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6, entries=200, sequenceid=95, filesize=38.6 K 2024-12-13T21:31:00,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/b22ada1f0b6e462f86ff619b665c9214 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b22ada1f0b6e462f86ff619b665c9214 2024-12-13T21:31:00,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b22ada1f0b6e462f86ff619b665c9214, entries=150, sequenceid=95, filesize=11.7 K 2024-12-13T21:31:00,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/3c683a97cfc847d9822308373696c51c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/3c683a97cfc847d9822308373696c51c 2024-12-13T21:31:00,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/3c683a97cfc847d9822308373696c51c, entries=150, sequenceid=95, filesize=11.7 K 2024-12-13T21:31:00,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for e2eceadaa1cf76613a4f5d367b5ca446 in 500ms, sequenceid=95, compaction requested=true 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:00,853 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:31:00,853 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:00,854 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:00,854 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:00,854 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:00,854 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:00,854 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:00,854 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:00,854 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/39eff9ba4308430fb7d7ebaaae184a73, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=99.2 K 2024-12-13T21:31:00,854 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:00,855 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/244a4c55882d4988957fcb3776a5f012, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8ceef386b8a840ffa83e060b3404a0c4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b22ada1f0b6e462f86ff619b665c9214] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=35.3 K 2024-12-13T21:31:00,855 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/39eff9ba4308430fb7d7ebaaae184a73, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6] 2024-12-13T21:31:00,855 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 39eff9ba4308430fb7d7ebaaae184a73, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734125458576 2024-12-13T21:31:00,855 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 244a4c55882d4988957fcb3776a5f012, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734125458576 2024-12-13T21:31:00,855 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 589862ff29c44d6991284757003c712d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734125458595 2024-12-13T21:31:00,855 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ceef386b8a840ffa83e060b3404a0c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734125458595 2024-12-13T21:31:00,856 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b22ada1f0b6e462f86ff619b665c9214, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734125459734 2024-12-13T21:31:00,857 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a7563e785b14cff886ec3e8df0338c6, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734125459734 2024-12-13T21:31:00,868 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:00,870 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#138 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:00,871 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/f8705eb777ab47fd8f036986ee92a989 is 50, key is test_row_0/B:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:00,872 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213b81e8fea3c714df38b4306905bd34f53_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:00,875 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213b81e8fea3c714df38b4306905bd34f53_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:00,876 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b81e8fea3c714df38b4306905bd34f53_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:00,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741987_1163 (size=4469) 2024-12-13T21:31:00,931 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#139 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:00,931 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/4d55c51716f64dd3bc1bcc29980dcc62 is 175, key is test_row_0/A:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:00,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741986_1162 (size=12207) 2024-12-13T21:31:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741988_1164 (size=31161) 2024-12-13T21:31:00,957 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/4d55c51716f64dd3bc1bcc29980dcc62 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4d55c51716f64dd3bc1bcc29980dcc62 2024-12-13T21:31:00,964 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 4d55c51716f64dd3bc1bcc29980dcc62(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:00,964 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:00,964 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125460853; duration=0sec 2024-12-13T21:31:00,965 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:00,965 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:00,965 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:00,967 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:00,967 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:00,967 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:00,967 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/369ad4107d984985bd4ac7b7fc15895e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/4fdce5a31de144cabe6948ca5e0cf9e2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/3c683a97cfc847d9822308373696c51c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=35.3 K 2024-12-13T21:31:00,968 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 369ad4107d984985bd4ac7b7fc15895e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1734125458576 2024-12-13T21:31:00,969 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fdce5a31de144cabe6948ca5e0cf9e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734125458595 2024-12-13T21:31:00,969 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c683a97cfc847d9822308373696c51c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734125459734 2024-12-13T21:31:00,987 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#140 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:00,988 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/ea75597592d442fbacf850c34858103b is 50, key is test_row_0/C:col10/1734125459738/Put/seqid=0 2024-12-13T21:31:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:01,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:31:01,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:01,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:01,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:01,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:01,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:01,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:01,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125521020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125521019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125521021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125521022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741989_1165 (size=12207) 2024-12-13T21:31:01,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b0e7598186c146389b76ea9583fb66cf_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125460385/Put/seqid=0 2024-12-13T21:31:01,058 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/ea75597592d442fbacf850c34858103b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/ea75597592d442fbacf850c34858103b 2024-12-13T21:31:01,066 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into ea75597592d442fbacf850c34858103b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:01,066 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:01,066 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=13, startTime=1734125460853; duration=0sec 2024-12-13T21:31:01,067 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:01,067 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:01,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741990_1166 (size=17034) 2024-12-13T21:31:01,085 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:01,090 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b0e7598186c146389b76ea9583fb66cf_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b0e7598186c146389b76ea9583fb66cf_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:01,092 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/6eb97e81da9c4c6caa71da5bf8f58001, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:01,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/6eb97e81da9c4c6caa71da5bf8f58001 is 175, key is test_row_0/A:col10/1734125460385/Put/seqid=0 2024-12-13T21:31:01,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741991_1167 (size=48139) 2024-12-13T21:31:01,122 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/6eb97e81da9c4c6caa71da5bf8f58001 2024-12-13T21:31:01,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125521127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125521127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125521127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125521128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/4010538c82e844b6a96c5096c5958354 is 50, key is test_row_0/B:col10/1734125460385/Put/seqid=0 2024-12-13T21:31:01,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741992_1168 (size=12001) 2024-12-13T21:31:01,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125521330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125521331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125521331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125521332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,344 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/f8705eb777ab47fd8f036986ee92a989 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f8705eb777ab47fd8f036986ee92a989 2024-12-13T21:31:01,354 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into f8705eb777ab47fd8f036986ee92a989(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:01,354 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:01,354 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125460853; duration=0sec 2024-12-13T21:31:01,354 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:01,354 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-13T21:31:01,357 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-13T21:31:01,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-13T21:31:01,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T21:31:01,360 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:01,361 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:01,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:01,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T21:31:01,513 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-13T21:31:01,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:01,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/4010538c82e844b6a96c5096c5958354 2024-12-13T21:31:01,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/f7ef92097dcd46c7bc63b660403ca1ef is 50, key is test_row_0/C:col10/1734125460385/Put/seqid=0 2024-12-13T21:31:01,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741993_1169 (size=12001) 2024-12-13T21:31:01,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125521635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125521636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125521636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:01,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125521642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-13T21:31:01,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:01,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T21:31:01,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,825 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-13T21:31:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T21:31:01,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:01,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-13T21:31:01,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:01,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:01,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:01,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/f7ef92097dcd46c7bc63b660403ca1ef 2024-12-13T21:31:02,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/6eb97e81da9c4c6caa71da5bf8f58001 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001 2024-12-13T21:31:02,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001, entries=250, sequenceid=123, filesize=47.0 K 2024-12-13T21:31:02,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/4010538c82e844b6a96c5096c5958354 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/4010538c82e844b6a96c5096c5958354 2024-12-13T21:31:02,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/4010538c82e844b6a96c5096c5958354, entries=150, sequenceid=123, filesize=11.7 K 2024-12-13T21:31:02,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/f7ef92097dcd46c7bc63b660403ca1ef as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/f7ef92097dcd46c7bc63b660403ca1ef 2024-12-13T21:31:02,136 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:02,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-13T21:31:02,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:02,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:02,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:02,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:02,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:02,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:02,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125522142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:02,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125522144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:02,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:02,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125522156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:02,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:02,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125522159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:02,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/f7ef92097dcd46c7bc63b660403ca1ef, entries=150, sequenceid=123, filesize=11.7 K 2024-12-13T21:31:02,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e2eceadaa1cf76613a4f5d367b5ca446 in 1190ms, sequenceid=123, compaction requested=false 2024-12-13T21:31:02,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:02,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:02,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:02,294 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:02,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:02,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412132dea0deb58514a7e8c213d9831b3ac95_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125461018/Put/seqid=0 2024-12-13T21:31:02,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741994_1170 (size=12254) 2024-12-13T21:31:02,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T21:31:02,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:02,717 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412132dea0deb58514a7e8c213d9831b3ac95_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412132dea0deb58514a7e8c213d9831b3ac95_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:02,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/26678b0a762a4a339182050018bab8b9, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:02,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/26678b0a762a4a339182050018bab8b9 is 175, key is test_row_0/A:col10/1734125461018/Put/seqid=0 2024-12-13T21:31:02,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741995_1171 (size=31055) 2024-12-13T21:31:02,723 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/26678b0a762a4a339182050018bab8b9 2024-12-13T21:31:02,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/3c6069a4fb534fe69e8a1fdc906f382a is 50, key is test_row_0/B:col10/1734125461018/Put/seqid=0 2024-12-13T21:31:02,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741996_1172 (size=12101) 2024-12-13T21:31:02,737 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/3c6069a4fb534fe69e8a1fdc906f382a 2024-12-13T21:31:02,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/6b56cd3a22754246b15e16c7c9bf0309 is 50, key is test_row_0/C:col10/1734125461018/Put/seqid=0 2024-12-13T21:31:02,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741997_1173 (size=12101) 2024-12-13T21:31:02,753 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/6b56cd3a22754246b15e16c7c9bf0309 2024-12-13T21:31:02,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:02,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:02,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/26678b0a762a4a339182050018bab8b9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9 2024-12-13T21:31:02,802 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9, entries=150, sequenceid=135, filesize=30.3 K 2024-12-13T21:31:02,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/3c6069a4fb534fe69e8a1fdc906f382a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3c6069a4fb534fe69e8a1fdc906f382a 2024-12-13T21:31:02,820 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3c6069a4fb534fe69e8a1fdc906f382a, entries=150, sequenceid=135, filesize=11.8 K 2024-12-13T21:31:02,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/6b56cd3a22754246b15e16c7c9bf0309 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6b56cd3a22754246b15e16c7c9bf0309 2024-12-13T21:31:02,832 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6b56cd3a22754246b15e16c7c9bf0309, entries=150, sequenceid=135, filesize=11.8 K 2024-12-13T21:31:02,833 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=80.51 KB/82440 for e2eceadaa1cf76613a4f5d367b5ca446 in 538ms, sequenceid=135, compaction requested=true 2024-12-13T21:31:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:02,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-13T21:31:02,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-13T21:31:02,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-13T21:31:02,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4750 sec 2024-12-13T21:31:02,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:02,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-13T21:31:02,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:02,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:02,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:02,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:02,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:02,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:02,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.4800 sec 2024-12-13T21:31:02,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121307d42bf8144f46b29c01f16722da9787_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741998_1174 (size=12304) 2024-12-13T21:31:02,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125522915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125523018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125523154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125523160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125523164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125523164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125523222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,284 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:03,288 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121307d42bf8144f46b29c01f16722da9787_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121307d42bf8144f46b29c01f16722da9787_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:03,289 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/713f3c8af006436bbe35dea4c573db60, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:03,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/713f3c8af006436bbe35dea4c573db60 is 175, key is test_row_0/A:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:03,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741999_1175 (size=31105) 2024-12-13T21:31:03,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T21:31:03,485 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-13T21:31:03,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:03,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-13T21:31:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:03,487 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:03,488 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:03,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:03,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:03,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125523526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:03,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:03,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:03,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:03,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:03,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,693 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=151, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/713f3c8af006436bbe35dea4c573db60 2024-12-13T21:31:03,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/cfa2998590fe4fb291263658341459ce is 50, key is test_row_0/B:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:03,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742000_1176 (size=12151) 2024-12-13T21:31:03,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:03,792 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:03,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:03,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:03,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:03,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:03,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:03,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:03,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:03,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:03,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:04,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125524029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:04,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:04,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:04,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:04,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:04,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/cfa2998590fe4fb291263658341459ce 2024-12-13T21:31:04,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/17fd9a0129f84ef3a88133295f60d7fc is 50, key is test_row_0/C:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:04,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742001_1177 (size=12151) 2024-12-13T21:31:04,250 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:04,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:04,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:04,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:04,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:04,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:04,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/17fd9a0129f84ef3a88133295f60d7fc 2024-12-13T21:31:04,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/713f3c8af006436bbe35dea4c573db60 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60 2024-12-13T21:31:04,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60, entries=150, sequenceid=151, filesize=30.4 K 2024-12-13T21:31:04,556 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:04,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/cfa2998590fe4fb291263658341459ce as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/cfa2998590fe4fb291263658341459ce 2024-12-13T21:31:04,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:04,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:04,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:04,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/cfa2998590fe4fb291263658341459ce, entries=150, sequenceid=151, filesize=11.9 K 2024-12-13T21:31:04,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/17fd9a0129f84ef3a88133295f60d7fc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/17fd9a0129f84ef3a88133295f60d7fc 2024-12-13T21:31:04,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/17fd9a0129f84ef3a88133295f60d7fc, entries=150, sequenceid=151, filesize=11.9 K 2024-12-13T21:31:04,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e2eceadaa1cf76613a4f5d367b5ca446 in 1749ms, sequenceid=151, compaction requested=true 2024-12-13T21:31:04,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:04,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:04,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:04,591 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:04,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:04,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:04,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:04,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:31:04,593 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:04,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:04,597 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:04,598 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:04,598 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,598 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:04,598 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4d55c51716f64dd3bc1bcc29980dcc62, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=138.1 K 2024-12-13T21:31:04,598 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:04,598 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,598 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,598 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4d55c51716f64dd3bc1bcc29980dcc62, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60] 2024-12-13T21:31:04,598 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f8705eb777ab47fd8f036986ee92a989, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/4010538c82e844b6a96c5096c5958354, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3c6069a4fb534fe69e8a1fdc906f382a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/cfa2998590fe4fb291263658341459ce] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=47.3 K 2024-12-13T21:31:04,599 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d55c51716f64dd3bc1bcc29980dcc62, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734125459734 2024-12-13T21:31:04,599 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting f8705eb777ab47fd8f036986ee92a989, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734125459734 2024-12-13T21:31:04,600 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eb97e81da9c4c6caa71da5bf8f58001, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734125460377 2024-12-13T21:31:04,600 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4010538c82e844b6a96c5096c5958354, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734125460385 2024-12-13T21:31:04,600 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26678b0a762a4a339182050018bab8b9, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125461004 2024-12-13T21:31:04,601 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c6069a4fb534fe69e8a1fdc906f382a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125461004 2024-12-13T21:31:04,601 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 713f3c8af006436bbe35dea4c573db60, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1734125462821 2024-12-13T21:31:04,603 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting cfa2998590fe4fb291263658341459ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1734125462821 2024-12-13T21:31:04,622 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:04,625 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213f78957e67f274a0999315fa8e40191d8_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:04,626 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#151 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:04,626 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/ca64a00e5de949578cd3340af2ec1eb2 is 50, key is test_row_0/B:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:04,631 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213f78957e67f274a0999315fa8e40191d8_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:04,631 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f78957e67f274a0999315fa8e40191d8_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:04,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742002_1178 (size=12493) 2024-12-13T21:31:04,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742003_1179 (size=4469) 2024-12-13T21:31:04,656 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#150 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:04,657 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/082bb04e489d4ceb91a6b3db9159e852 is 175, key is test_row_0/A:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:04,662 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/ca64a00e5de949578cd3340af2ec1eb2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/ca64a00e5de949578cd3340af2ec1eb2 2024-12-13T21:31:04,669 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into ca64a00e5de949578cd3340af2ec1eb2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:04,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742004_1180 (size=31447) 2024-12-13T21:31:04,669 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:04,669 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=12, startTime=1734125464591; duration=0sec 2024-12-13T21:31:04,669 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:04,669 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:04,669 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:04,672 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:04,672 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:04,673 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,673 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/ea75597592d442fbacf850c34858103b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/f7ef92097dcd46c7bc63b660403ca1ef, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6b56cd3a22754246b15e16c7c9bf0309, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/17fd9a0129f84ef3a88133295f60d7fc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=47.3 K 2024-12-13T21:31:04,675 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ea75597592d442fbacf850c34858103b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734125459734 2024-12-13T21:31:04,676 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting f7ef92097dcd46c7bc63b660403ca1ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734125460385 2024-12-13T21:31:04,677 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b56cd3a22754246b15e16c7c9bf0309, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125461004 2024-12-13T21:31:04,677 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 17fd9a0129f84ef3a88133295f60d7fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1734125462821 2024-12-13T21:31:04,680 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/082bb04e489d4ceb91a6b3db9159e852 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/082bb04e489d4ceb91a6b3db9159e852 2024-12-13T21:31:04,689 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 082bb04e489d4ceb91a6b3db9159e852(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:04,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:04,689 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=12, startTime=1734125464590; duration=0sec 2024-12-13T21:31:04,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:04,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:04,698 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#152 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:04,698 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fda8704ad48b4395b52fc34fb32baa56 is 50, key is test_row_0/C:col10/1734125462821/Put/seqid=0 2024-12-13T21:31:04,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:04,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-13T21:31:04,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:04,711 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-13T21:31:04,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:04,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742005_1181 (size=12493) 2024-12-13T21:31:04,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ae3be48e531e405b8a36b5a13b3d7785_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125462904/Put/seqid=0 2024-12-13T21:31:04,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742006_1182 (size=12304) 2024-12-13T21:31:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:05,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:05,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125525067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,124 INFO [master/fd052dae32be:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T21:31:05,124 INFO [master/fd052dae32be:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T21:31:05,129 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fda8704ad48b4395b52fc34fb32baa56 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fda8704ad48b4395b52fc34fb32baa56 2024-12-13T21:31:05,138 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into fda8704ad48b4395b52fc34fb32baa56(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:05,138 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:05,138 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=12, startTime=1734125464591; duration=0sec 2024-12-13T21:31:05,138 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:05,138 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:05,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:05,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125525163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,165 DEBUG [Thread-675 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:05,167 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ae3be48e531e405b8a36b5a13b3d7785_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ae3be48e531e405b8a36b5a13b3d7785_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/b55a59b895524594a42c6a68344f1852, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/b55a59b895524594a42c6a68344f1852 is 175, key is test_row_0/A:col10/1734125462904/Put/seqid=0 2024-12-13T21:31:05,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125525169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742007_1183 (size=31105) 2024-12-13T21:31:05,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125525178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,182 DEBUG [Thread-677 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:05,183 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/b55a59b895524594a42c6a68344f1852 2024-12-13T21:31:05,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125525183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,190 DEBUG [Thread-671 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:05,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125525183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,194 DEBUG [Thread-679 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:05,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/e852c8ce008d44a3a5b7872bd49d3073 is 50, key is test_row_0/B:col10/1734125462904/Put/seqid=0 2024-12-13T21:31:05,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742008_1184 (size=12151) 2024-12-13T21:31:05,211 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/e852c8ce008d44a3a5b7872bd49d3073 2024-12-13T21:31:05,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/0414003848ad44b68dab741039c3aa1b is 50, key is test_row_0/C:col10/1734125462904/Put/seqid=0 2024-12-13T21:31:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742009_1185 (size=12151) 2024-12-13T21:31:05,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125525373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:05,654 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/0414003848ad44b68dab741039c3aa1b 2024-12-13T21:31:05,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/b55a59b895524594a42c6a68344f1852 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852 2024-12-13T21:31:05,668 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852, entries=150, sequenceid=173, filesize=30.4 K 2024-12-13T21:31:05,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/e852c8ce008d44a3a5b7872bd49d3073 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/e852c8ce008d44a3a5b7872bd49d3073 2024-12-13T21:31:05,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125525676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:05,680 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/e852c8ce008d44a3a5b7872bd49d3073, entries=150, sequenceid=173, filesize=11.9 K 2024-12-13T21:31:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/0414003848ad44b68dab741039c3aa1b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/0414003848ad44b68dab741039c3aa1b 2024-12-13T21:31:05,687 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/0414003848ad44b68dab741039c3aa1b, entries=150, sequenceid=173, filesize=11.9 K 2024-12-13T21:31:05,688 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e2eceadaa1cf76613a4f5d367b5ca446 in 977ms, sequenceid=173, compaction requested=false 2024-12-13T21:31:05,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:05,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:05,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-13T21:31:05,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-13T21:31:05,695 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-13T21:31:05,695 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2020 sec 2024-12-13T21:31:05,697 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.2100 sec 2024-12-13T21:31:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:06,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-13T21:31:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:06,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412131934b79d87ad4e2b88363ff6bace0d91_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:06,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742010_1186 (size=14794) 2024-12-13T21:31:06,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125526229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:06,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125526330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:06,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125526532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:06,596 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:06,602 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412131934b79d87ad4e2b88363ff6bace0d91_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412131934b79d87ad4e2b88363ff6bace0d91_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:06,603 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/14815879ad3246919a8334490f2d14b3, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:06,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/14815879ad3246919a8334490f2d14b3 is 175, key is test_row_0/A:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:06,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742011_1187 (size=39749) 2024-12-13T21:31:06,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:06,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125526837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:07,015 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=191, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/14815879ad3246919a8334490f2d14b3 2024-12-13T21:31:07,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/544666aea4534882a61e0e9c69e4453a is 50, key is test_row_0/B:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:07,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742012_1188 (size=12151) 2024-12-13T21:31:07,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:07,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125527342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:07,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/544666aea4534882a61e0e9c69e4453a 2024-12-13T21:31:07,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fd632df9169142e498a34666497f2a44 is 50, key is test_row_0/C:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:07,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742013_1189 (size=12151) 2024-12-13T21:31:07,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fd632df9169142e498a34666497f2a44 2024-12-13T21:31:07,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/14815879ad3246919a8334490f2d14b3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3 2024-12-13T21:31:07,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3, entries=200, sequenceid=191, filesize=38.8 K 2024-12-13T21:31:07,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/544666aea4534882a61e0e9c69e4453a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/544666aea4534882a61e0e9c69e4453a 2024-12-13T21:31:07,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/544666aea4534882a61e0e9c69e4453a, entries=150, sequenceid=191, filesize=11.9 K 2024-12-13T21:31:07,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fd632df9169142e498a34666497f2a44 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fd632df9169142e498a34666497f2a44 2024-12-13T21:31:07,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fd632df9169142e498a34666497f2a44, entries=150, sequenceid=191, filesize=11.9 K 2024-12-13T21:31:07,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e2eceadaa1cf76613a4f5d367b5ca446 in 1338ms, sequenceid=191, compaction requested=true 2024-12-13T21:31:07,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:07,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:07,521 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:07,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:07,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:07,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:07,521 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:07,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:07,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:07,524 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:07,524 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:07,524 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:07,524 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:07,524 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:07,524 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:07,524 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/082bb04e489d4ceb91a6b3db9159e852, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=99.9 K 2024-12-13T21:31:07,524 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/ca64a00e5de949578cd3340af2ec1eb2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/e852c8ce008d44a3a5b7872bd49d3073, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/544666aea4534882a61e0e9c69e4453a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=35.9 K 2024-12-13T21:31:07,524 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:07,525 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/082bb04e489d4ceb91a6b3db9159e852, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3] 2024-12-13T21:31:07,525 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ca64a00e5de949578cd3340af2ec1eb2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1734125462821 2024-12-13T21:31:07,525 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 082bb04e489d4ceb91a6b3db9159e852, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1734125462821 2024-12-13T21:31:07,525 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e852c8ce008d44a3a5b7872bd49d3073, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734125462897 2024-12-13T21:31:07,525 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b55a59b895524594a42c6a68344f1852, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734125462897 2024-12-13T21:31:07,526 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14815879ad3246919a8334490f2d14b3, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734125465039 2024-12-13T21:31:07,526 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 544666aea4534882a61e0e9c69e4453a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734125465039 2024-12-13T21:31:07,537 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:07,538 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:07,539 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/c4033c6021904c3f99f6fb5b4a26a445 is 50, key is test_row_0/B:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:07,554 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412137a47cdb344494c649c8c0ce655925415_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:07,557 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412137a47cdb344494c649c8c0ce655925415_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:07,558 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412137a47cdb344494c649c8c0ce655925415_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:07,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742014_1190 (size=12595) 2024-12-13T21:31:07,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742015_1191 (size=4469) 2024-12-13T21:31:07,578 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#160 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:07,578 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/17d3793f2a7a4fb9bdc36467af46e7a6 is 175, key is test_row_0/A:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:07,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742016_1192 (size=31549) 2024-12-13T21:31:07,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-13T21:31:07,597 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-13T21:31:07,598 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-13T21:31:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-13T21:31:07,600 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:07,601 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:07,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:07,605 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/17d3793f2a7a4fb9bdc36467af46e7a6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/17d3793f2a7a4fb9bdc36467af46e7a6 2024-12-13T21:31:07,610 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 17d3793f2a7a4fb9bdc36467af46e7a6(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:07,610 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:07,611 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125467520; duration=0sec 2024-12-13T21:31:07,611 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:07,611 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:07,611 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:07,612 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:07,613 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:07,613 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:07,613 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fda8704ad48b4395b52fc34fb32baa56, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/0414003848ad44b68dab741039c3aa1b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fd632df9169142e498a34666497f2a44] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=35.9 K 2024-12-13T21:31:07,613 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fda8704ad48b4395b52fc34fb32baa56, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1734125462821 2024-12-13T21:31:07,613 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0414003848ad44b68dab741039c3aa1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734125462897 2024-12-13T21:31:07,613 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd632df9169142e498a34666497f2a44, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734125465039 2024-12-13T21:31:07,622 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#161 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:07,623 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/c29e8072ebf241b282324e9478833521 is 50, key is test_row_0/C:col10/1734125466181/Put/seqid=0 2024-12-13T21:31:07,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742017_1193 (size=12595) 2024-12-13T21:31:07,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-13T21:31:07,752 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:07,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-13T21:31:07,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:07,753 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-13T21:31:07,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:07,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:07,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:07,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:07,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:07,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:07,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412133a756c91c72e4a0aa2a6e468e68eeafa_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125466218/Put/seqid=0 2024-12-13T21:31:07,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742018_1194 (size=12304) 2024-12-13T21:31:07,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:07,787 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412133a756c91c72e4a0aa2a6e468e68eeafa_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412133a756c91c72e4a0aa2a6e468e68eeafa_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd82675cf1904921bd1f4c88b3897dde, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd82675cf1904921bd1f4c88b3897dde is 175, key is test_row_0/A:col10/1734125466218/Put/seqid=0 2024-12-13T21:31:07,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742019_1195 (size=31105) 2024-12-13T21:31:07,807 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd82675cf1904921bd1f4c88b3897dde 2024-12-13T21:31:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/2a7075048c844ef491c3200b7be0f70e is 50, key is test_row_0/B:col10/1734125466218/Put/seqid=0 2024-12-13T21:31:07,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742020_1196 (size=12151) 2024-12-13T21:31:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-13T21:31:07,980 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/c4033c6021904c3f99f6fb5b4a26a445 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c4033c6021904c3f99f6fb5b4a26a445 2024-12-13T21:31:07,988 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into c4033c6021904c3f99f6fb5b4a26a445(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:07,988 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:07,988 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125467521; duration=0sec 2024-12-13T21:31:07,988 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:07,988 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:08,038 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/c29e8072ebf241b282324e9478833521 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c29e8072ebf241b282324e9478833521 2024-12-13T21:31:08,044 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into c29e8072ebf241b282324e9478833521(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:08,044 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:08,044 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=13, startTime=1734125467521; duration=0sec 2024-12-13T21:31:08,044 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:08,044 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-13T21:31:08,230 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/2a7075048c844ef491c3200b7be0f70e 2024-12-13T21:31:08,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/83c08d72f1bd43c19be2b28e50da4334 is 50, key is test_row_0/C:col10/1734125466218/Put/seqid=0 2024-12-13T21:31:08,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742021_1197 (size=12151) 2024-12-13T21:31:08,240 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/83c08d72f1bd43c19be2b28e50da4334 2024-12-13T21:31:08,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd82675cf1904921bd1f4c88b3897dde as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde 2024-12-13T21:31:08,252 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde, entries=150, sequenceid=211, filesize=30.4 K 2024-12-13T21:31:08,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/2a7075048c844ef491c3200b7be0f70e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/2a7075048c844ef491c3200b7be0f70e 2024-12-13T21:31:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,257 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/2a7075048c844ef491c3200b7be0f70e, entries=150, sequenceid=211, filesize=11.9 K 2024-12-13T21:31:08,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/83c08d72f1bd43c19be2b28e50da4334 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/83c08d72f1bd43c19be2b28e50da4334 2024-12-13T21:31:08,261 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/83c08d72f1bd43c19be2b28e50da4334, entries=150, sequenceid=211, filesize=11.9 K 2024-12-13T21:31:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,262 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for e2eceadaa1cf76613a4f5d367b5ca446 in 508ms, sequenceid=211, compaction requested=false 2024-12-13T21:31:08,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:08,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:08,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-13T21:31:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-13T21:31:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-13T21:31:08,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 662 msec 2024-12-13T21:31:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 667 msec 2024-12-13T21:31:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:08,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:31:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f3f0255180474edb8334cd5b9024789f_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:08,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742023_1199 (size=24758) 2024-12-13T21:31:08,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:08,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125528545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:08,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125528649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-13T21:31:08,703 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-13T21:31:08,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-13T21:31:08,710 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T21:31:08,710 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:08,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T21:31:08,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125528856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:08,862 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:08,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:08,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:08,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:08,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:08,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:08,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:08,914 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:08,925 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f3f0255180474edb8334cd5b9024789f_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f3f0255180474edb8334cd5b9024789f_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:08,926 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/87db0466810e4ed5a7e2299b0a75ff18, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:08,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/87db0466810e4ed5a7e2299b0a75ff18 is 175, key is test_row_0/A:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:08,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742022_1198 (size=74395) 2024-12-13T21:31:08,930 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=224, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/87db0466810e4ed5a7e2299b0a75ff18 2024-12-13T21:31:08,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/309bd8bd26cd43ceb08706f374e14a2a is 50, key is test_row_0/B:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:08,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742024_1200 (size=12151) 2024-12-13T21:31:08,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/309bd8bd26cd43ceb08706f374e14a2a 2024-12-13T21:31:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T21:31:09,014 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/d09ea29e250a4d6ca307172687ec9233 is 50, key is test_row_0/C:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:09,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742025_1201 (size=12151) 2024-12-13T21:31:09,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/d09ea29e250a4d6ca307172687ec9233 2024-12-13T21:31:09,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/87db0466810e4ed5a7e2299b0a75ff18 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18 2024-12-13T21:31:09,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18, entries=400, sequenceid=224, filesize=72.7 K 2024-12-13T21:31:09,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/309bd8bd26cd43ceb08706f374e14a2a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/309bd8bd26cd43ceb08706f374e14a2a 2024-12-13T21:31:09,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/309bd8bd26cd43ceb08706f374e14a2a, entries=150, sequenceid=224, filesize=11.9 K 2024-12-13T21:31:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/d09ea29e250a4d6ca307172687ec9233 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/d09ea29e250a4d6ca307172687ec9233 2024-12-13T21:31:09,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/d09ea29e250a4d6ca307172687ec9233, entries=150, sequenceid=224, filesize=11.9 K 2024-12-13T21:31:09,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e2eceadaa1cf76613a4f5d367b5ca446 in 601ms, sequenceid=224, compaction requested=true 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:09,070 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:09,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:31:09,071 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:09,072 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:09,072 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:09,072 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,072 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/17d3793f2a7a4fb9bdc36467af46e7a6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=133.8 K 2024-12-13T21:31:09,072 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,072 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/17d3793f2a7a4fb9bdc36467af46e7a6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18] 2024-12-13T21:31:09,073 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17d3793f2a7a4fb9bdc36467af46e7a6, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734125465039 2024-12-13T21:31:09,073 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd82675cf1904921bd1f4c88b3897dde, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734125466218 2024-12-13T21:31:09,073 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:09,073 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:09,073 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87db0466810e4ed5a7e2299b0a75ff18, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1734125468396 2024-12-13T21:31:09,073 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,073 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c4033c6021904c3f99f6fb5b4a26a445, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/2a7075048c844ef491c3200b7be0f70e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/309bd8bd26cd43ceb08706f374e14a2a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=36.0 K 2024-12-13T21:31:09,074 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c4033c6021904c3f99f6fb5b4a26a445, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734125465039 2024-12-13T21:31:09,074 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a7075048c844ef491c3200b7be0f70e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734125466218 2024-12-13T21:31:09,074 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 309bd8bd26cd43ceb08706f374e14a2a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1734125468431 2024-12-13T21:31:09,088 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:09,094 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#169 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:09,094 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/700efd06071f44b7bf5a6abe47e7de56 is 50, key is test_row_0/B:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:09,096 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412135990c9fa30a34279a9a4e5aa364c9530_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:09,099 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412135990c9fa30a34279a9a4e5aa364c9530_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:09,099 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412135990c9fa30a34279a9a4e5aa364c9530_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:09,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742026_1202 (size=12697) 2024-12-13T21:31:09,140 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/700efd06071f44b7bf5a6abe47e7de56 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/700efd06071f44b7bf5a6abe47e7de56 2024-12-13T21:31:09,148 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into 700efd06071f44b7bf5a6abe47e7de56(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:09,148 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:09,148 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125469070; duration=0sec 2024-12-13T21:31:09,148 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:09,148 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:09,148 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:09,150 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:09,150 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:09,150 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,151 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c29e8072ebf241b282324e9478833521, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/83c08d72f1bd43c19be2b28e50da4334, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/d09ea29e250a4d6ca307172687ec9233] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=36.0 K 2024-12-13T21:31:09,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742027_1203 (size=4469) 2024-12-13T21:31:09,153 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c29e8072ebf241b282324e9478833521, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734125465039 2024-12-13T21:31:09,154 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#168 average throughput is 0.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:09,154 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/8af207e3e838439387851f78a553298c is 175, key is test_row_0/A:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:09,155 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 83c08d72f1bd43c19be2b28e50da4334, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734125466218 2024-12-13T21:31:09,156 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d09ea29e250a4d6ca307172687ec9233, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1734125468431 2024-12-13T21:31:09,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:09,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-13T21:31:09,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:09,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:09,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:09,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:09,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:09,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:09,170 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#170 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:09,171 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/5a7bac31882f42889cca441ed7263730 is 50, key is test_row_0/C:col10/1734125468431/Put/seqid=0 2024-12-13T21:31:09,174 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:09,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125529189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742028_1204 (size=31651) 2024-12-13T21:31:09,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36248 deadline: 1734125529195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36200 deadline: 1734125529196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,199 DEBUG [Thread-675 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:09,199 DEBUG [Thread-677 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:09,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36244 deadline: 1734125529196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,200 DEBUG [Thread-671 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:09,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b9891c312dd14b149bbe8f5f6c025d18_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125469163/Put/seqid=0 2024-12-13T21:31:09,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742029_1205 (size=12697) 2024-12-13T21:31:09,220 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/5a7bac31882f42889cca441ed7263730 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/5a7bac31882f42889cca441ed7263730 2024-12-13T21:31:09,229 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into 5a7bac31882f42889cca441ed7263730(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:09,229 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:09,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36230 deadline: 1734125529229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,231 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=13, startTime=1734125469070; duration=0sec 2024-12-13T21:31:09,231 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:09,231 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:09,231 DEBUG [Thread-679 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8210 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:09,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742030_1206 (size=14794) 2024-12-13T21:31:09,246 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:09,251 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b9891c312dd14b149bbe8f5f6c025d18_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b9891c312dd14b149bbe8f5f6c025d18_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:09,253 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/463c0969185d461badf31a2c2bf9c243, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:09,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/463c0969185d461badf31a2c2bf9c243 is 175, key is test_row_0/A:col10/1734125469163/Put/seqid=0 2024-12-13T21:31:09,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742031_1207 (size=39749) 2024-12-13T21:31:09,263 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/463c0969185d461badf31a2c2bf9c243 2024-12-13T21:31:09,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/bcc1d271c6b44438ade7d84608170cb0 is 50, key is test_row_0/B:col10/1734125469163/Put/seqid=0 2024-12-13T21:31:09,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742032_1208 (size=12151) 2024-12-13T21:31:09,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/bcc1d271c6b44438ade7d84608170cb0 2024-12-13T21:31:09,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125529293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/46d51db89c7d442b9a492b974dddf3a2 is 50, key is test_row_0/C:col10/1734125469163/Put/seqid=0 2024-12-13T21:31:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T21:31:09,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742033_1209 (size=12151) 2024-12-13T21:31:09,326 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:09,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:09,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,327 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,479 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125529498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,597 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/8af207e3e838439387851f78a553298c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/8af207e3e838439387851f78a553298c 2024-12-13T21:31:09,603 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 8af207e3e838439387851f78a553298c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:09,603 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:09,603 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125469070; duration=0sec 2024-12-13T21:31:09,604 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:09,604 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:09,631 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:09,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/46d51db89c7d442b9a492b974dddf3a2 2024-12-13T21:31:09,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/463c0969185d461badf31a2c2bf9c243 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243 2024-12-13T21:31:09,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243, entries=200, sequenceid=251, filesize=38.8 K 2024-12-13T21:31:09,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/bcc1d271c6b44438ade7d84608170cb0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/bcc1d271c6b44438ade7d84608170cb0 2024-12-13T21:31:09,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/bcc1d271c6b44438ade7d84608170cb0, entries=150, sequenceid=251, filesize=11.9 K 2024-12-13T21:31:09,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/46d51db89c7d442b9a492b974dddf3a2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/46d51db89c7d442b9a492b974dddf3a2 2024-12-13T21:31:09,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/46d51db89c7d442b9a492b974dddf3a2, entries=150, sequenceid=251, filesize=11.9 K 2024-12-13T21:31:09,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e2eceadaa1cf76613a4f5d367b5ca446 in 592ms, sequenceid=251, compaction requested=false 2024-12-13T21:31:09,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:09,783 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-13T21:31:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:09,784 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-13T21:31:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:09,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:09,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ed74de8ba2e9469e843d589220ac293a_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125469172/Put/seqid=0 2024-12-13T21:31:09,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:09,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:09,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T21:31:09,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742034_1210 (size=12404) 2024-12-13T21:31:09,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:09,820 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ed74de8ba2e9469e843d589220ac293a_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ed74de8ba2e9469e843d589220ac293a_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:09,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/9170df6d6ec049aeb80b5463bb48d0e1, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:09,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/9170df6d6ec049aeb80b5463bb48d0e1 is 175, key is test_row_0/A:col10/1734125469172/Put/seqid=0 2024-12-13T21:31:09,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742035_1211 (size=31205) 2024-12-13T21:31:09,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125529852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:09,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125529956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:10,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:10,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125530160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:10,227 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=263, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/9170df6d6ec049aeb80b5463bb48d0e1 2024-12-13T21:31:10,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/5fa73bce40a846cdb94b05a45efd6836 is 50, key is test_row_0/B:col10/1734125469172/Put/seqid=0 2024-12-13T21:31:10,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742036_1212 (size=12251) 2024-12-13T21:31:10,238 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/5fa73bce40a846cdb94b05a45efd6836 2024-12-13T21:31:10,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/1f4746df1b114d81af3312644d6c4cdf is 50, key is test_row_0/C:col10/1734125469172/Put/seqid=0 2024-12-13T21:31:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742037_1213 (size=12251) 2024-12-13T21:31:10,252 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/1f4746df1b114d81af3312644d6c4cdf 2024-12-13T21:31:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/9170df6d6ec049aeb80b5463bb48d0e1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1 2024-12-13T21:31:10,260 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1, entries=150, sequenceid=263, filesize=30.5 K 2024-12-13T21:31:10,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/5fa73bce40a846cdb94b05a45efd6836 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/5fa73bce40a846cdb94b05a45efd6836 2024-12-13T21:31:10,266 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/5fa73bce40a846cdb94b05a45efd6836, entries=150, sequenceid=263, filesize=12.0 K 2024-12-13T21:31:10,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/1f4746df1b114d81af3312644d6c4cdf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1f4746df1b114d81af3312644d6c4cdf 2024-12-13T21:31:10,271 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1f4746df1b114d81af3312644d6c4cdf, entries=150, sequenceid=263, filesize=12.0 K 2024-12-13T21:31:10,272 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for e2eceadaa1cf76613a4f5d367b5ca446 in 488ms, sequenceid=263, compaction requested=true 2024-12-13T21:31:10,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:10,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:10,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-13T21:31:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-13T21:31:10,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-13T21:31:10,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5630 sec 2024-12-13T21:31:10,276 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.5670 sec 2024-12-13T21:31:10,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:10,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-13T21:31:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:10,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125530478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:10,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213276e871e23b14a5a9cd571b8af635e55_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:10,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742038_1214 (size=14994) 2024-12-13T21:31:10,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:10,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125530580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:10,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125530784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:10,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T21:31:10,814 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-13T21:31:10,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:10,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-13T21:31:10,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-13T21:31:10,817 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:10,818 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:10,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:10,902 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:10,905 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213276e871e23b14a5a9cd571b8af635e55_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213276e871e23b14a5a9cd571b8af635e55_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:10,906 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/037320ef21e54560a6e8bd60b4f58293, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:10,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/037320ef21e54560a6e8bd60b4f58293 is 175, key is test_row_0/A:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:10,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742039_1215 (size=39949) 2024-12-13T21:31:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-13T21:31:10,969 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:10,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-13T21:31:10,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:10,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:10,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:10,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:10,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:10,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:11,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125531090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-13T21:31:11,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:11,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-13T21:31:11,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:11,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,282 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:11,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-13T21:31:11,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:11,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:11,313 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/037320ef21e54560a6e8bd60b4f58293 2024-12-13T21:31:11,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/03c39b041afe4009aaf3586f3cccc2bb is 50, key is test_row_0/B:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:11,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742040_1216 (size=12301) 2024-12-13T21:31:11,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/03c39b041afe4009aaf3586f3cccc2bb 2024-12-13T21:31:11,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/69df6b3f42ea495b976921f22977bf37 is 50, key is test_row_0/C:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:11,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742041_1217 (size=12301) 2024-12-13T21:31:11,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/69df6b3f42ea495b976921f22977bf37 2024-12-13T21:31:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/037320ef21e54560a6e8bd60b4f58293 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293 2024-12-13T21:31:11,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293, entries=200, sequenceid=290, filesize=39.0 K 2024-12-13T21:31:11,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/03c39b041afe4009aaf3586f3cccc2bb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/03c39b041afe4009aaf3586f3cccc2bb 2024-12-13T21:31:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/03c39b041afe4009aaf3586f3cccc2bb, entries=150, sequenceid=290, filesize=12.0 K 2024-12-13T21:31:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/69df6b3f42ea495b976921f22977bf37 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/69df6b3f42ea495b976921f22977bf37 2024-12-13T21:31:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/69df6b3f42ea495b976921f22977bf37, entries=150, sequenceid=290, filesize=12.0 K 2024-12-13T21:31:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for e2eceadaa1cf76613a4f5d367b5ca446 in 899ms, sequenceid=290, compaction requested=true 2024-12-13T21:31:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:11,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:11,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:11,365 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:11,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:11,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:11,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:11,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:31:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,365 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,367 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:11,367 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:11,367 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,367 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/700efd06071f44b7bf5a6abe47e7de56, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/bcc1d271c6b44438ade7d84608170cb0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/5fa73bce40a846cdb94b05a45efd6836, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/03c39b041afe4009aaf3586f3cccc2bb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=48.2 K 2024-12-13T21:31:11,368 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142554 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:11,368 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:11,368 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,368 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/8af207e3e838439387851f78a553298c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=139.2 K 2024-12-13T21:31:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,368 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,368 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/8af207e3e838439387851f78a553298c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293] 2024-12-13T21:31:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,368 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 700efd06071f44b7bf5a6abe47e7de56, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1734125468431 2024-12-13T21:31:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,369 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8af207e3e838439387851f78a553298c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1734125468431 2024-12-13T21:31:11,369 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bcc1d271c6b44438ade7d84608170cb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734125468532 2024-12-13T21:31:11,369 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 463c0969185d461badf31a2c2bf9c243, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734125468530 2024-12-13T21:31:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,369 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fa73bce40a846cdb94b05a45efd6836, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1734125469172 2024-12-13T21:31:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,370 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 03c39b041afe4009aaf3586f3cccc2bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734125469846 2024-12-13T21:31:11,370 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9170df6d6ec049aeb80b5463bb48d0e1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1734125469172 2024-12-13T21:31:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,371 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 037320ef21e54560a6e8bd60b4f58293, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734125469846 2024-12-13T21:31:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,381 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,386 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121346660d01e91b4345a5658866e78c6fcc_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,388 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#181 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:11,388 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/0e03ad45bfd44fabb57aced6f672d242 is 50, key is test_row_0/B:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:11,389 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121346660d01e91b4345a5658866e78c6fcc_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:11,389 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121346660d01e91b4345a5658866e78c6fcc_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742042_1218 (size=4469) 2024-12-13T21:31:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,418 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#180 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,418 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/33b7d6ca21d3467a8e5846f7cde12607 is 175, key is test_row_0/A:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-13T21:31:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742043_1219 (size=12983) 2024-12-13T21:31:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-13T21:31:11,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,436 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-13T21:31:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,442 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/0e03ad45bfd44fabb57aced6f672d242 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/0e03ad45bfd44fabb57aced6f672d242 2024-12-13T21:31:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,448 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into 0e03ad45bfd44fabb57aced6f672d242(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:11,448 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:11,448 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=12, startTime=1734125471365; duration=0sec 2024-12-13T21:31:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,448 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:11,448 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,448 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,450 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:11,450 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:11,450 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,450 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/5a7bac31882f42889cca441ed7263730, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/46d51db89c7d442b9a492b974dddf3a2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1f4746df1b114d81af3312644d6c4cdf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/69df6b3f42ea495b976921f22977bf37] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=48.2 K 2024-12-13T21:31:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,451 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a7bac31882f42889cca441ed7263730, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1734125468431 2024-12-13T21:31:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,451 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 46d51db89c7d442b9a492b974dddf3a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734125468532 2024-12-13T21:31:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,451 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f4746df1b114d81af3312644d6c4cdf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1734125469172 2024-12-13T21:31:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,452 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 69df6b3f42ea495b976921f22977bf37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734125469846 2024-12-13T21:31:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742044_1220 (size=31937) 2024-12-13T21:31:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,473 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#182 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,474 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/02c0c793877c4c7091a09bdf9e008cf9 is 50, key is test_row_0/C:col10/1734125469850/Put/seqid=0 2024-12-13T21:31:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121353818eb46d6d4897851670fbebe1f17c_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125470477/Put/seqid=0 2024-12-13T21:31:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,483 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/33b7d6ca21d3467a8e5846f7cde12607 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/33b7d6ca21d3467a8e5846f7cde12607 2024-12-13T21:31:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,491 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 33b7d6ca21d3467a8e5846f7cde12607(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,491 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:11,491 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=12, startTime=1734125471364; duration=0sec 2024-12-13T21:31:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,491 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:11,491 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742046_1222 (size=9914) 2024-12-13T21:31:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742045_1221 (size=12983) 2024-12-13T21:31:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,538 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/02c0c793877c4c7091a09bdf9e008cf9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/02c0c793877c4c7091a09bdf9e008cf9 2024-12-13T21:31:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,544 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into 02c0c793877c4c7091a09bdf9e008cf9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:11,544 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:11,544 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=12, startTime=1734125471365; duration=0sec 2024-12-13T21:31:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,544 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:11,545 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:11,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:11,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125531824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:11,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-13T21:31:11,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:11,930 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121353818eb46d6d4897851670fbebe1f17c_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121353818eb46d6d4897851670fbebe1f17c_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/d32a4ddb994c4e8a93456a9c2f5656e1, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/d32a4ddb994c4e8a93456a9c2f5656e1 is 175, key is test_row_0/A:col10/1734125470477/Put/seqid=0 2024-12-13T21:31:11,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742047_1223 (size=22561) 2024-12-13T21:31:11,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125531944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:12,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:12,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125532150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:12,345 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=299, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/d32a4ddb994c4e8a93456a9c2f5656e1 2024-12-13T21:31:12,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/f3d1b79753894a51a886c14b45e24006 is 50, key is test_row_0/B:col10/1734125470477/Put/seqid=0 2024-12-13T21:31:12,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742048_1224 (size=9857) 2024-12-13T21:31:12,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:12,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125532451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:12,757 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/f3d1b79753894a51a886c14b45e24006 2024-12-13T21:31:12,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/8aba03f8e18947a4a71fb959183e1987 is 50, key is test_row_0/C:col10/1734125470477/Put/seqid=0 2024-12-13T21:31:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742049_1225 (size=9857) 2024-12-13T21:31:12,776 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/8aba03f8e18947a4a71fb959183e1987 2024-12-13T21:31:12,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/d32a4ddb994c4e8a93456a9c2f5656e1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1 2024-12-13T21:31:12,785 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1, entries=100, sequenceid=299, filesize=22.0 K 2024-12-13T21:31:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/f3d1b79753894a51a886c14b45e24006 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f3d1b79753894a51a886c14b45e24006 2024-12-13T21:31:12,791 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f3d1b79753894a51a886c14b45e24006, entries=100, sequenceid=299, filesize=9.6 K 2024-12-13T21:31:12,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/8aba03f8e18947a4a71fb959183e1987 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/8aba03f8e18947a4a71fb959183e1987 2024-12-13T21:31:12,799 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/8aba03f8e18947a4a71fb959183e1987, entries=100, sequenceid=299, filesize=9.6 K 2024-12-13T21:31:12,800 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for e2eceadaa1cf76613a4f5d367b5ca446 in 1364ms, sequenceid=299, compaction requested=false 2024-12-13T21:31:12,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:12,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:12,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-13T21:31:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-13T21:31:12,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-13T21:31:12,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9830 sec 2024-12-13T21:31:12,804 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.9870 sec 2024-12-13T21:31:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-13T21:31:12,923 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-13T21:31:12,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-13T21:31:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-13T21:31:12,926 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:12,926 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:12,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:12,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-13T21:31:12,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:12,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:12,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:12,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:12,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:12,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:12,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213be715aae1aea439c99d498989e3ac18f_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:12,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125532970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:12,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742050_1226 (size=12454) 2024-12-13T21:31:12,972 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:12,975 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213be715aae1aea439c99d498989e3ac18f_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213be715aae1aea439c99d498989e3ac18f_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:12,976 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd81fcb1b38e4d79abb624583d470111, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:12,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd81fcb1b38e4d79abb624583d470111 is 175, key is test_row_0/A:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:12,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742051_1227 (size=31255) 2024-12-13T21:31:13,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-13T21:31:13,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125533072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:13,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:13,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-13T21:31:13,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:13,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:13,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125533275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,380 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd81fcb1b38e4d79abb624583d470111 2024-12-13T21:31:13,382 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:13,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:13,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,383 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/6f4b04e7f38e4be1b8766a6f4ab498d4 is 50, key is test_row_0/B:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:13,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742052_1228 (size=12301) 2024-12-13T21:31:13,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/6f4b04e7f38e4be1b8766a6f4ab498d4 2024-12-13T21:31:13,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/c5be0f72926e45a69bc3a18a7a4d42ba is 50, key is test_row_0/C:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:13,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742053_1229 (size=12301) 2024-12-13T21:31:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-13T21:31:13,541 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:13,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:13,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:13,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125533578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:13,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:13,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:13,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:13,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/c5be0f72926e45a69bc3a18a7a4d42ba 2024-12-13T21:31:13,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/dd81fcb1b38e4d79abb624583d470111 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111 2024-12-13T21:31:13,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111, entries=150, sequenceid=330, filesize=30.5 K 2024-12-13T21:31:13,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/6f4b04e7f38e4be1b8766a6f4ab498d4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6f4b04e7f38e4be1b8766a6f4ab498d4 2024-12-13T21:31:13,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6f4b04e7f38e4be1b8766a6f4ab498d4, entries=150, sequenceid=330, filesize=12.0 K 2024-12-13T21:31:13,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/c5be0f72926e45a69bc3a18a7a4d42ba as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c5be0f72926e45a69bc3a18a7a4d42ba 2024-12-13T21:31:13,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c5be0f72926e45a69bc3a18a7a4d42ba, entries=150, sequenceid=330, filesize=12.0 K 2024-12-13T21:31:13,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for e2eceadaa1cf76613a4f5d367b5ca446 in 915ms, sequenceid=330, compaction requested=true 2024-12-13T21:31:13,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:13,874 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:13,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:13,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:13,874 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:13,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:13,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:13,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:13,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:13,875 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:13,875 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:13,875 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:13,875 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,875 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:13,875 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,875 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/33b7d6ca21d3467a8e5846f7cde12607, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=83.7 K 2024-12-13T21:31:13,876 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/0e03ad45bfd44fabb57aced6f672d242, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f3d1b79753894a51a886c14b45e24006, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6f4b04e7f38e4be1b8766a6f4ab498d4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=34.3 K 2024-12-13T21:31:13,876 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,876 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/33b7d6ca21d3467a8e5846f7cde12607, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111] 2024-12-13T21:31:13,876 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e03ad45bfd44fabb57aced6f672d242, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734125469846 2024-12-13T21:31:13,876 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33b7d6ca21d3467a8e5846f7cde12607, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734125469846 2024-12-13T21:31:13,876 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting f3d1b79753894a51a886c14b45e24006, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734125470474 2024-12-13T21:31:13,876 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting d32a4ddb994c4e8a93456a9c2f5656e1, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734125470474 2024-12-13T21:31:13,876 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f4b04e7f38e4be1b8766a6f4ab498d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1734125471810 2024-12-13T21:31:13,877 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd81fcb1b38e4d79abb624583d470111, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1734125471810 2024-12-13T21:31:13,884 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:13,885 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/694e0de7c60243bebdbe345112c4dcf3 is 50, key is test_row_0/B:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:13,888 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:13,901 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412132486bb33082e41dd9d7b4222a08cc44c_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:13,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742054_1230 (size=13085) 2024-12-13T21:31:13,902 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412132486bb33082e41dd9d7b4222a08cc44c_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:13,903 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412132486bb33082e41dd9d7b4222a08cc44c_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:13,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742055_1231 (size=4469) 2024-12-13T21:31:13,908 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#190 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:13,908 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/77796e1510b8406f82d9bbff526af4c8 is 175, key is test_row_0/A:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:13,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742056_1232 (size=32039) 2024-12-13T21:31:13,937 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/77796e1510b8406f82d9bbff526af4c8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/77796e1510b8406f82d9bbff526af4c8 2024-12-13T21:31:13,953 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 77796e1510b8406f82d9bbff526af4c8(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:13,953 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:13,953 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125473874; duration=0sec 2024-12-13T21:31:13,953 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:13,953 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:13,953 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:13,956 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:13,956 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:13,956 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:13,956 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/02c0c793877c4c7091a09bdf9e008cf9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/8aba03f8e18947a4a71fb959183e1987, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c5be0f72926e45a69bc3a18a7a4d42ba] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=34.3 K 2024-12-13T21:31:13,957 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02c0c793877c4c7091a09bdf9e008cf9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734125469846 2024-12-13T21:31:13,957 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8aba03f8e18947a4a71fb959183e1987, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734125470474 2024-12-13T21:31:13,957 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5be0f72926e45a69bc3a18a7a4d42ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1734125471810 2024-12-13T21:31:13,977 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#191 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:13,978 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/6da2ca33b4fc4d3dab5c2f386702782d is 50, key is test_row_0/C:col10/1734125471813/Put/seqid=0 2024-12-13T21:31:13,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:14,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-13T21:31:14,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742057_1233 (size=13085) 2024-12-13T21:31:14,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:14,002 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-13T21:31:14,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:14,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:14,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:14,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:14,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:14,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213e571814190f044c79467205159c63da5_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125472966/Put/seqid=0 2024-12-13T21:31:14,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742058_1234 (size=12454) 2024-12-13T21:31:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-13T21:31:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:14,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:14,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:14,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125534146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:14,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:14,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125534250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:14,307 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/694e0de7c60243bebdbe345112c4dcf3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/694e0de7c60243bebdbe345112c4dcf3 2024-12-13T21:31:14,311 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into 694e0de7c60243bebdbe345112c4dcf3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:14,312 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:14,312 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125473874; duration=0sec 2024-12-13T21:31:14,312 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:14,312 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:14,407 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/6da2ca33b4fc4d3dab5c2f386702782d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6da2ca33b4fc4d3dab5c2f386702782d 2024-12-13T21:31:14,415 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into 6da2ca33b4fc4d3dab5c2f386702782d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:14,415 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:14,415 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=13, startTime=1734125473875; duration=0sec 2024-12-13T21:31:14,415 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:14,415 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:14,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:14,434 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213e571814190f044c79467205159c63da5_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213e571814190f044c79467205159c63da5_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/691d4b5fbfb44cca8b0ca9534b63f0e2, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:14,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/691d4b5fbfb44cca8b0ca9534b63f0e2 is 175, key is test_row_0/A:col10/1734125472966/Put/seqid=0 2024-12-13T21:31:14,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742059_1235 (size=31255) 2024-12-13T21:31:14,441 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=339, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/691d4b5fbfb44cca8b0ca9534b63f0e2 2024-12-13T21:31:14,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/b53c900f1e1847c5a8eda642034b34a9 is 50, key is test_row_0/B:col10/1734125472966/Put/seqid=0 2024-12-13T21:31:14,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:14,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125534456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:14,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742060_1236 (size=12301) 2024-12-13T21:31:14,481 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/b53c900f1e1847c5a8eda642034b34a9 2024-12-13T21:31:14,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/beb8cdb647a94e86a37328b00e297707 is 50, key is test_row_0/C:col10/1734125472966/Put/seqid=0 2024-12-13T21:31:14,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742061_1237 (size=12301) 2024-12-13T21:31:14,509 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/beb8cdb647a94e86a37328b00e297707 2024-12-13T21:31:14,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/691d4b5fbfb44cca8b0ca9534b63f0e2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2 2024-12-13T21:31:14,518 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2, entries=150, sequenceid=339, filesize=30.5 K 2024-12-13T21:31:14,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/b53c900f1e1847c5a8eda642034b34a9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b53c900f1e1847c5a8eda642034b34a9 2024-12-13T21:31:14,523 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b53c900f1e1847c5a8eda642034b34a9, entries=150, sequenceid=339, filesize=12.0 K 2024-12-13T21:31:14,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/beb8cdb647a94e86a37328b00e297707 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/beb8cdb647a94e86a37328b00e297707 2024-12-13T21:31:14,528 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/beb8cdb647a94e86a37328b00e297707, entries=150, sequenceid=339, filesize=12.0 K 2024-12-13T21:31:14,528 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for e2eceadaa1cf76613a4f5d367b5ca446 in 526ms, sequenceid=339, compaction requested=false 2024-12-13T21:31:14,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:14,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:14,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-13T21:31:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-13T21:31:14,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-13T21:31:14,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6030 sec 2024-12-13T21:31:14,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.6080 sec 2024-12-13T21:31:14,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:14,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-13T21:31:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:14,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:14,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:14,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125534769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:14,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412131776138d84cd4cd882d629f4bf4b9aa4_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:14,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742062_1238 (size=14994) 2024-12-13T21:31:14,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:14,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125534870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-13T21:31:15,030 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-13T21:31:15,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:15,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-13T21:31:15,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:15,033 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:15,033 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:15,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:15,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:15,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125535073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:15,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:15,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:15,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,191 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:15,194 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412131776138d84cd4cd882d629f4bf4b9aa4_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412131776138d84cd4cd882d629f4bf4b9aa4_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:15,195 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/0972bc3c5c5349d794cf00fdfbfdfe9e, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:15,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/0972bc3c5c5349d794cf00fdfbfdfe9e is 175, key is test_row_0/A:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:15,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742063_1239 (size=39949) 2024-12-13T21:31:15,200 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=370, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/0972bc3c5c5349d794cf00fdfbfdfe9e 2024-12-13T21:31:15,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/8004043723dd47f1b1a7e51e705d819a is 50, key is test_row_0/B:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:15,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742064_1240 (size=12301) 2024-12-13T21:31:15,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:15,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:15,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:15,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:15,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 309 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125535374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,489 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/8004043723dd47f1b1a7e51e705d819a 2024-12-13T21:31:15,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/a96ac905600b4ff2bb161c1500e7fe22 is 50, key is test_row_0/C:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:15,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742065_1241 (size=12301) 2024-12-13T21:31:15,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:15,642 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:15,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:15,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:15,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:15,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:15,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125535879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:15,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:15,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:15,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:16,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/a96ac905600b4ff2bb161c1500e7fe22 2024-12-13T21:31:16,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/0972bc3c5c5349d794cf00fdfbfdfe9e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e 2024-12-13T21:31:16,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e, entries=200, sequenceid=370, filesize=39.0 K 2024-12-13T21:31:16,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/8004043723dd47f1b1a7e51e705d819a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8004043723dd47f1b1a7e51e705d819a 2024-12-13T21:31:16,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8004043723dd47f1b1a7e51e705d819a, entries=150, sequenceid=370, filesize=12.0 K 2024-12-13T21:31:16,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/a96ac905600b4ff2bb161c1500e7fe22 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/a96ac905600b4ff2bb161c1500e7fe22 2024-12-13T21:31:16,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/a96ac905600b4ff2bb161c1500e7fe22, entries=150, sequenceid=370, filesize=12.0 K 2024-12-13T21:31:16,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for e2eceadaa1cf76613a4f5d367b5ca446 in 1284ms, sequenceid=370, compaction requested=true 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:16,045 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:16,045 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:16,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:16,046 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:16,046 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:16,046 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:16,047 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:16,047 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:16,047 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/77796e1510b8406f82d9bbff526af4c8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=100.8 K 2024-12-13T21:31:16,047 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:16,047 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:16,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/77796e1510b8406f82d9bbff526af4c8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e] 2024-12-13T21:31:16,047 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/694e0de7c60243bebdbe345112c4dcf3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b53c900f1e1847c5a8eda642034b34a9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8004043723dd47f1b1a7e51e705d819a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=36.8 K 2024-12-13T21:31:16,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77796e1510b8406f82d9bbff526af4c8, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1734125471810 2024-12-13T21:31:16,047 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 694e0de7c60243bebdbe345112c4dcf3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1734125471810 2024-12-13T21:31:16,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 691d4b5fbfb44cca8b0ca9534b63f0e2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1734125472961 2024-12-13T21:31:16,047 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b53c900f1e1847c5a8eda642034b34a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1734125472961 2024-12-13T21:31:16,048 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8004043723dd47f1b1a7e51e705d819a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734125474133 2024-12-13T21:31:16,048 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0972bc3c5c5349d794cf00fdfbfdfe9e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734125474133 2024-12-13T21:31:16,055 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:16,056 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:16,057 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/d8acbf49f42a4572b7068a1c547d86fb is 50, key is test_row_0/B:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:16,064 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121309a3a0dd11b04a4fac3dcf4c4128b643_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:16,065 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121309a3a0dd11b04a4fac3dcf4c4128b643_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:16,065 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121309a3a0dd11b04a4fac3dcf4c4128b643_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:16,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742067_1243 (size=4469) 2024-12-13T21:31:16,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742066_1242 (size=13187) 2024-12-13T21:31:16,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:16,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-13T21:31:16,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:16,100 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-13T21:31:16,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:16,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:16,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:16,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:16,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:16,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:16,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213286c14210bdf4a0097fdd4ccff0b2166_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125474768/Put/seqid=0 2024-12-13T21:31:16,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742068_1244 (size=12454) 2024-12-13T21:31:16,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:16,114 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213286c14210bdf4a0097fdd4ccff0b2166_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213286c14210bdf4a0097fdd4ccff0b2166_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:16,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/29f4a4c702cd41ada90ae8b5eb3b16e0, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:16,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/29f4a4c702cd41ada90ae8b5eb3b16e0 is 175, key is test_row_0/A:col10/1734125474768/Put/seqid=0 2024-12-13T21:31:16,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742069_1245 (size=31255) 2024-12-13T21:31:16,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:16,488 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#199 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:16,489 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/a21f8208164d461e942c509e75600e69 is 175, key is test_row_0/A:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:16,492 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/d8acbf49f42a4572b7068a1c547d86fb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/d8acbf49f42a4572b7068a1c547d86fb 2024-12-13T21:31:16,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742070_1246 (size=32141) 2024-12-13T21:31:16,497 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/a21f8208164d461e942c509e75600e69 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/a21f8208164d461e942c509e75600e69 2024-12-13T21:31:16,497 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into d8acbf49f42a4572b7068a1c547d86fb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:16,497 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:16,497 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125476045; duration=0sec 2024-12-13T21:31:16,497 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:16,497 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:16,497 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:16,498 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:16,498 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/C is initiating minor compaction (all files) 2024-12-13T21:31:16,498 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/C in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:16,498 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6da2ca33b4fc4d3dab5c2f386702782d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/beb8cdb647a94e86a37328b00e297707, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/a96ac905600b4ff2bb161c1500e7fe22] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=36.8 K 2024-12-13T21:31:16,499 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6da2ca33b4fc4d3dab5c2f386702782d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1734125471810 2024-12-13T21:31:16,499 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting beb8cdb647a94e86a37328b00e297707, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1734125472961 2024-12-13T21:31:16,499 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a96ac905600b4ff2bb161c1500e7fe22, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734125474133 2024-12-13T21:31:16,502 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into a21f8208164d461e942c509e75600e69(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:16,502 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:16,502 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125476045; duration=0sec 2024-12-13T21:31:16,502 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:16,502 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:16,517 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#C#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:16,518 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/751ee33c9ba9493a89a5e4d7ba9c152c is 50, key is test_row_0/C:col10/1734125474133/Put/seqid=0 2024-12-13T21:31:16,520 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/29f4a4c702cd41ada90ae8b5eb3b16e0 2024-12-13T21:31:16,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742071_1247 (size=13187) 2024-12-13T21:31:16,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/a967c109258e4bd1bc1bc9632b01896c is 50, key is test_row_0/B:col10/1734125474768/Put/seqid=0 2024-12-13T21:31:16,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742072_1248 (size=12301) 2024-12-13T21:31:16,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. as already flushing 2024-12-13T21:31:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:16,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 339 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125536924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:16,933 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/751ee33c9ba9493a89a5e4d7ba9c152c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/751ee33c9ba9493a89a5e4d7ba9c152c 2024-12-13T21:31:16,938 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/C of e2eceadaa1cf76613a4f5d367b5ca446 into 751ee33c9ba9493a89a5e4d7ba9c152c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:16,938 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:16,938 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/C, priority=13, startTime=1734125476045; duration=0sec 2024-12-13T21:31:16,938 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:16,938 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:16,942 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/a967c109258e4bd1bc1bc9632b01896c 2024-12-13T21:31:16,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/634515d03c74490684726cbf59e1976e is 50, key is test_row_0/C:col10/1734125474768/Put/seqid=0 2024-12-13T21:31:16,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742073_1249 (size=12301) 2024-12-13T21:31:17,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 341 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125537026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:17,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:17,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 343 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36202 deadline: 1734125537228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:17,244 DEBUG [Thread-688 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6974f84e to 127.0.0.1:57927 2024-12-13T21:31:17,244 DEBUG [Thread-688 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:17,244 DEBUG [Thread-682 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x250a1de4 to 127.0.0.1:57927 2024-12-13T21:31:17,244 DEBUG [Thread-682 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:17,245 DEBUG [Thread-684 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49456175 to 127.0.0.1:57927 2024-12-13T21:31:17,245 DEBUG [Thread-684 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:17,246 DEBUG [Thread-686 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d919649 to 127.0.0.1:57927 2024-12-13T21:31:17,246 DEBUG [Thread-686 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:17,354 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:31:17,375 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/634515d03c74490684726cbf59e1976e 2024-12-13T21:31:17,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/29f4a4c702cd41ada90ae8b5eb3b16e0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0 2024-12-13T21:31:17,388 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0, entries=150, sequenceid=377, filesize=30.5 K 2024-12-13T21:31:17,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/a967c109258e4bd1bc1bc9632b01896c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/a967c109258e4bd1bc1bc9632b01896c 2024-12-13T21:31:17,393 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/a967c109258e4bd1bc1bc9632b01896c, entries=150, sequenceid=377, filesize=12.0 K 2024-12-13T21:31:17,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/634515d03c74490684726cbf59e1976e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/634515d03c74490684726cbf59e1976e 2024-12-13T21:31:17,397 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/634515d03c74490684726cbf59e1976e, entries=150, sequenceid=377, filesize=12.0 K 2024-12-13T21:31:17,398 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for e2eceadaa1cf76613a4f5d367b5ca446 in 1298ms, sequenceid=377, compaction requested=false 2024-12-13T21:31:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-13T21:31:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-13T21:31:17,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-13T21:31:17,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3660 sec 2024-12-13T21:31:17,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.3690 sec 2024-12-13T21:31:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:17,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-13T21:31:17,532 DEBUG [Thread-673 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x28c904d8 to 127.0.0.1:57927 2024-12-13T21:31:17,532 DEBUG [Thread-673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:17,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:17,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:17,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:17,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:17,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:17,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:17,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213d8b6064289804cee83057e7660955b2a_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125477531/Put/seqid=0 2024-12-13T21:31:17,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742074_1250 (size=12454) 2024-12-13T21:31:17,944 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:17,952 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213d8b6064289804cee83057e7660955b2a_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213d8b6064289804cee83057e7660955b2a_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:17,954 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/4f92688b80b34fb499474da7739f96bc, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:17,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/4f92688b80b34fb499474da7739f96bc is 175, key is test_row_0/A:col10/1734125477531/Put/seqid=0 2024-12-13T21:31:17,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742075_1251 (size=31255) 2024-12-13T21:31:18,361 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=410, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/4f92688b80b34fb499474da7739f96bc 2024-12-13T21:31:18,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/3cba472d520748fc8f3061cc58f031fe is 50, key is test_row_0/B:col10/1734125477531/Put/seqid=0 2024-12-13T21:31:18,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742076_1252 (size=12301) 2024-12-13T21:31:18,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/3cba472d520748fc8f3061cc58f031fe 2024-12-13T21:31:18,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/2e274643ea66418c936153f1d39c661b is 50, key is test_row_0/C:col10/1734125477531/Put/seqid=0 2024-12-13T21:31:18,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742077_1253 (size=12301) 2024-12-13T21:31:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-13T21:31:19,137 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-13T21:31:19,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/2e274643ea66418c936153f1d39c661b 2024-12-13T21:31:19,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/4f92688b80b34fb499474da7739f96bc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc 2024-12-13T21:31:19,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc, entries=150, sequenceid=410, filesize=30.5 K 2024-12-13T21:31:19,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/3cba472d520748fc8f3061cc58f031fe as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3cba472d520748fc8f3061cc58f031fe 2024-12-13T21:31:19,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3cba472d520748fc8f3061cc58f031fe, entries=150, sequenceid=410, filesize=12.0 K 2024-12-13T21:31:19,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/2e274643ea66418c936153f1d39c661b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/2e274643ea66418c936153f1d39c661b 2024-12-13T21:31:19,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/2e274643ea66418c936153f1d39c661b, entries=150, sequenceid=410, filesize=12.0 K 2024-12-13T21:31:19,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=0 B/0 for e2eceadaa1cf76613a4f5d367b5ca446 in 1687ms, sequenceid=410, compaction requested=true 2024-12-13T21:31:19,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:19,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:19,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:19,220 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:19,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:19,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:19,220 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:19,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e2eceadaa1cf76613a4f5d367b5ca446:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:19,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:19,221 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:19,221 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:19,221 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/B is initiating minor compaction (all files) 2024-12-13T21:31:19,221 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): e2eceadaa1cf76613a4f5d367b5ca446/A is initiating minor compaction (all files) 2024-12-13T21:31:19,221 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/B in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:19,221 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e2eceadaa1cf76613a4f5d367b5ca446/A in TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:19,221 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/d8acbf49f42a4572b7068a1c547d86fb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/a967c109258e4bd1bc1bc9632b01896c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3cba472d520748fc8f3061cc58f031fe] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=36.9 K 2024-12-13T21:31:19,221 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/a21f8208164d461e942c509e75600e69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp, totalSize=92.4 K 2024-12-13T21:31:19,221 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:19,221 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/a21f8208164d461e942c509e75600e69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc] 2024-12-13T21:31:19,222 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d8acbf49f42a4572b7068a1c547d86fb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734125474133 2024-12-13T21:31:19,222 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a21f8208164d461e942c509e75600e69, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734125474133 2024-12-13T21:31:19,222 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a967c109258e4bd1bc1bc9632b01896c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1734125474764 2024-12-13T21:31:19,222 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29f4a4c702cd41ada90ae8b5eb3b16e0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1734125474764 2024-12-13T21:31:19,222 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cba472d520748fc8f3061cc58f031fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734125476922 2024-12-13T21:31:19,222 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f92688b80b34fb499474da7739f96bc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734125476922 2024-12-13T21:31:19,229 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#B#compaction#207 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:19,229 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/7506d8fd99f343b4b7f1312b705f324c is 50, key is test_row_0/B:col10/1734125477531/Put/seqid=0 2024-12-13T21:31:19,233 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:19,236 DEBUG [Thread-671 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x748ab582 to 127.0.0.1:57927 2024-12-13T21:31:19,236 DEBUG [Thread-671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:19,236 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213b66b6185f7dc45148906c8927c482314_e2eceadaa1cf76613a4f5d367b5ca446 store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:19,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742078_1254 (size=13289) 2024-12-13T21:31:19,243 DEBUG [Thread-675 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74be9bc0 to 127.0.0.1:57927 2024-12-13T21:31:19,243 DEBUG [Thread-675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:19,251 DEBUG [Thread-679 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d5e0e3f to 127.0.0.1:57927 2024-12-13T21:31:19,251 DEBUG [Thread-679 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:19,264 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213b66b6185f7dc45148906c8927c482314_e2eceadaa1cf76613a4f5d367b5ca446, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:19,265 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b66b6185f7dc45148906c8927c482314_e2eceadaa1cf76613a4f5d367b5ca446 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:19,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742079_1255 (size=4469) 2024-12-13T21:31:19,290 DEBUG [Thread-677 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x776c0cb7 to 127.0.0.1:57927 2024-12-13T21:31:19,290 DEBUG [Thread-677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 228 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8190 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8048 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3471 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10413 rows 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3442 2024-12-13T21:31:19,290 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10326 rows 2024-12-13T21:31:19,290 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:31:19,290 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46dc1373 to 127.0.0.1:57927 2024-12-13T21:31:19,290 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:19,294 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-13T21:31:19,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-12-13T21:31:19,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:19,298 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125479298"}]},"ts":"1734125479298"} 2024-12-13T21:31:19,299 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-13T21:31:19,314 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-13T21:31:19,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:31:19,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, UNASSIGN}] 2024-12-13T21:31:19,317 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, UNASSIGN 2024-12-13T21:31:19,317 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:19,318 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:31:19,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:31:19,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:19,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:19,470 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:19,470 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:31:19,470 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing e2eceadaa1cf76613a4f5d367b5ca446, disabling compactions & flushes 2024-12-13T21:31:19,470 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:19,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:19,644 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/7506d8fd99f343b4b7f1312b705f324c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/7506d8fd99f343b4b7f1312b705f324c 2024-12-13T21:31:19,651 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/B of e2eceadaa1cf76613a4f5d367b5ca446 into 7506d8fd99f343b4b7f1312b705f324c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:19,651 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:19,651 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/B, priority=13, startTime=1734125479220; duration=0sec 2024-12-13T21:31:19,651 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:19,651 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:B 2024-12-13T21:31:19,651 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. because compaction request was cancelled 2024-12-13T21:31:19,651 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:C 2024-12-13T21:31:19,670 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e2eceadaa1cf76613a4f5d367b5ca446#A#compaction#208 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:19,670 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/3b0950bb51584613adc0740042f670b1 is 175, key is test_row_0/A:col10/1734125477531/Put/seqid=0 2024-12-13T21:31:19,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742080_1256 (size=32243) 2024-12-13T21:31:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:20,088 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/3b0950bb51584613adc0740042f670b1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3b0950bb51584613adc0740042f670b1 2024-12-13T21:31:20,095 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e2eceadaa1cf76613a4f5d367b5ca446/A of e2eceadaa1cf76613a4f5d367b5ca446 into 3b0950bb51584613adc0740042f670b1(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:20,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:20,095 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446., storeName=e2eceadaa1cf76613a4f5d367b5ca446/A, priority=13, startTime=1734125479220; duration=0sec 2024-12-13T21:31:20,095 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:20,095 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:20,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:20,095 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. after waiting 0 ms 2024-12-13T21:31:20,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e2eceadaa1cf76613a4f5d367b5ca446:A 2024-12-13T21:31:20,095 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:20,095 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing e2eceadaa1cf76613a4f5d367b5ca446 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-13T21:31:20,095 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=A 2024-12-13T21:31:20,096 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:20,096 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=B 2024-12-13T21:31:20,096 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:20,096 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e2eceadaa1cf76613a4f5d367b5ca446, store=C 2024-12-13T21:31:20,096 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:20,100 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b86ef51241904a77820fb79b90173d2c_e2eceadaa1cf76613a4f5d367b5ca446 is 50, key is test_row_0/A:col10/1734125479239/Put/seqid=0 2024-12-13T21:31:20,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742081_1257 (size=9914) 2024-12-13T21:31:20,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:20,505 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:20,513 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b86ef51241904a77820fb79b90173d2c_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b86ef51241904a77820fb79b90173d2c_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:20,515 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/fb2396fe5e3e42efaacd65cd9c1ee968, store: [table=TestAcidGuarantees family=A region=e2eceadaa1cf76613a4f5d367b5ca446] 2024-12-13T21:31:20,516 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/fb2396fe5e3e42efaacd65cd9c1ee968 is 175, key is test_row_0/A:col10/1734125479239/Put/seqid=0 2024-12-13T21:31:20,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742082_1258 (size=22561) 2024-12-13T21:31:20,922 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=419, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/fb2396fe5e3e42efaacd65cd9c1ee968 2024-12-13T21:31:20,931 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/9036dce08fae42359c143d713621e353 is 50, key is test_row_0/B:col10/1734125479239/Put/seqid=0 2024-12-13T21:31:20,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742083_1259 (size=9857) 2024-12-13T21:31:21,337 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/9036dce08fae42359c143d713621e353 2024-12-13T21:31:21,351 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fc1f045833164fd8bf1b59780e60c480 is 50, key is test_row_0/C:col10/1734125479239/Put/seqid=0 2024-12-13T21:31:21,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742084_1260 (size=9857) 2024-12-13T21:31:21,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:21,756 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fc1f045833164fd8bf1b59780e60c480 2024-12-13T21:31:21,763 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/A/fb2396fe5e3e42efaacd65cd9c1ee968 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/fb2396fe5e3e42efaacd65cd9c1ee968 2024-12-13T21:31:21,768 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/fb2396fe5e3e42efaacd65cd9c1ee968, entries=100, sequenceid=419, filesize=22.0 K 2024-12-13T21:31:21,770 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/B/9036dce08fae42359c143d713621e353 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/9036dce08fae42359c143d713621e353 2024-12-13T21:31:21,775 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/9036dce08fae42359c143d713621e353, entries=100, sequenceid=419, filesize=9.6 K 2024-12-13T21:31:21,776 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/.tmp/C/fc1f045833164fd8bf1b59780e60c480 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fc1f045833164fd8bf1b59780e60c480 2024-12-13T21:31:21,779 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fc1f045833164fd8bf1b59780e60c480, entries=100, sequenceid=419, filesize=9.6 K 2024-12-13T21:31:21,780 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for e2eceadaa1cf76613a4f5d367b5ca446 in 1685ms, sequenceid=419, compaction requested=true 2024-12-13T21:31:21,780 DEBUG [StoreCloser-TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/39eff9ba4308430fb7d7ebaaae184a73, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4d55c51716f64dd3bc1bcc29980dcc62, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/082bb04e489d4ceb91a6b3db9159e852, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/17d3793f2a7a4fb9bdc36467af46e7a6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/8af207e3e838439387851f78a553298c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/33b7d6ca21d3467a8e5846f7cde12607, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/77796e1510b8406f82d9bbff526af4c8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/a21f8208164d461e942c509e75600e69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc] to archive 2024-12-13T21:31:21,781 DEBUG [StoreCloser-TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:31:21,783 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/39eff9ba4308430fb7d7ebaaae184a73 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/39eff9ba4308430fb7d7ebaaae184a73 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/1bc9242bbfbd4893adae6c6a141cb1f1 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/589862ff29c44d6991284757003c712d 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/02c46d57e7764fb88ce406baaeeb2ef0 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/2220731e59f540df82a11dd3ebb24893 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3a7563e785b14cff886ec3e8df0338c6 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4d55c51716f64dd3bc1bcc29980dcc62 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4d55c51716f64dd3bc1bcc29980dcc62 2024-12-13T21:31:21,784 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/6eb97e81da9c4c6caa71da5bf8f58001 2024-12-13T21:31:21,785 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/b55a59b895524594a42c6a68344f1852 2024-12-13T21:31:21,785 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/14815879ad3246919a8334490f2d14b3 2024-12-13T21:31:21,785 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/26678b0a762a4a339182050018bab8b9 2024-12-13T21:31:21,785 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/082bb04e489d4ceb91a6b3db9159e852 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/082bb04e489d4ceb91a6b3db9159e852 2024-12-13T21:31:21,786 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/713f3c8af006436bbe35dea4c573db60 2024-12-13T21:31:21,786 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd82675cf1904921bd1f4c88b3897dde 2024-12-13T21:31:21,786 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/17d3793f2a7a4fb9bdc36467af46e7a6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/17d3793f2a7a4fb9bdc36467af46e7a6 2024-12-13T21:31:21,786 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/87db0466810e4ed5a7e2299b0a75ff18 2024-12-13T21:31:21,787 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/8af207e3e838439387851f78a553298c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/8af207e3e838439387851f78a553298c 2024-12-13T21:31:21,787 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/9170df6d6ec049aeb80b5463bb48d0e1 2024-12-13T21:31:21,787 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/037320ef21e54560a6e8bd60b4f58293 2024-12-13T21:31:21,787 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/463c0969185d461badf31a2c2bf9c243 2024-12-13T21:31:21,788 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/d32a4ddb994c4e8a93456a9c2f5656e1 2024-12-13T21:31:21,788 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/33b7d6ca21d3467a8e5846f7cde12607 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/33b7d6ca21d3467a8e5846f7cde12607 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/dd81fcb1b38e4d79abb624583d470111 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/77796e1510b8406f82d9bbff526af4c8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/77796e1510b8406f82d9bbff526af4c8 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/a21f8208164d461e942c509e75600e69 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/a21f8208164d461e942c509e75600e69 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/691d4b5fbfb44cca8b0ca9534b63f0e2 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/4f92688b80b34fb499474da7739f96bc 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/29f4a4c702cd41ada90ae8b5eb3b16e0 2024-12-13T21:31:21,789 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/0972bc3c5c5349d794cf00fdfbfdfe9e 2024-12-13T21:31:21,791 DEBUG [StoreCloser-TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c6c7104348064a149cd5d7e4830d15d4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6b5bd8750e184fb580374498f2ae20f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/244a4c55882d4988957fcb3776a5f012, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/1c0d252fd85f4fe7bf9f614b11b70aa3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8ceef386b8a840ffa83e060b3404a0c4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f8705eb777ab47fd8f036986ee92a989, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b22ada1f0b6e462f86ff619b665c9214, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/4010538c82e844b6a96c5096c5958354, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3c6069a4fb534fe69e8a1fdc906f382a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/ca64a00e5de949578cd3340af2ec1eb2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/cfa2998590fe4fb291263658341459ce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/e852c8ce008d44a3a5b7872bd49d3073, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c4033c6021904c3f99f6fb5b4a26a445, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/544666aea4534882a61e0e9c69e4453a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/2a7075048c844ef491c3200b7be0f70e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/700efd06071f44b7bf5a6abe47e7de56, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/309bd8bd26cd43ceb08706f374e14a2a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/bcc1d271c6b44438ade7d84608170cb0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/5fa73bce40a846cdb94b05a45efd6836, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/0e03ad45bfd44fabb57aced6f672d242, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/03c39b041afe4009aaf3586f3cccc2bb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f3d1b79753894a51a886c14b45e24006, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/694e0de7c60243bebdbe345112c4dcf3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6f4b04e7f38e4be1b8766a6f4ab498d4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b53c900f1e1847c5a8eda642034b34a9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/d8acbf49f42a4572b7068a1c547d86fb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8004043723dd47f1b1a7e51e705d819a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/a967c109258e4bd1bc1bc9632b01896c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3cba472d520748fc8f3061cc58f031fe] to archive 2024-12-13T21:31:21,792 DEBUG [StoreCloser-TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/244a4c55882d4988957fcb3776a5f012 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/244a4c55882d4988957fcb3776a5f012 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6b5bd8750e184fb580374498f2ae20f1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6b5bd8750e184fb580374498f2ae20f1 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c6c7104348064a149cd5d7e4830d15d4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c6c7104348064a149cd5d7e4830d15d4 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/4010538c82e844b6a96c5096c5958354 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/4010538c82e844b6a96c5096c5958354 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f8705eb777ab47fd8f036986ee92a989 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f8705eb777ab47fd8f036986ee92a989 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8ceef386b8a840ffa83e060b3404a0c4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8ceef386b8a840ffa83e060b3404a0c4 2024-12-13T21:31:21,794 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b22ada1f0b6e462f86ff619b665c9214 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b22ada1f0b6e462f86ff619b665c9214 2024-12-13T21:31:21,795 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/1c0d252fd85f4fe7bf9f614b11b70aa3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/1c0d252fd85f4fe7bf9f614b11b70aa3 2024-12-13T21:31:21,796 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3c6069a4fb534fe69e8a1fdc906f382a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3c6069a4fb534fe69e8a1fdc906f382a 2024-12-13T21:31:21,796 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/ca64a00e5de949578cd3340af2ec1eb2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/ca64a00e5de949578cd3340af2ec1eb2 2024-12-13T21:31:21,796 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/e852c8ce008d44a3a5b7872bd49d3073 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/e852c8ce008d44a3a5b7872bd49d3073 2024-12-13T21:31:21,796 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/cfa2998590fe4fb291263658341459ce to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/cfa2998590fe4fb291263658341459ce 2024-12-13T21:31:21,796 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/2a7075048c844ef491c3200b7be0f70e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/2a7075048c844ef491c3200b7be0f70e 2024-12-13T21:31:21,796 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c4033c6021904c3f99f6fb5b4a26a445 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/c4033c6021904c3f99f6fb5b4a26a445 2024-12-13T21:31:21,797 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/544666aea4534882a61e0e9c69e4453a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/544666aea4534882a61e0e9c69e4453a 2024-12-13T21:31:21,797 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/700efd06071f44b7bf5a6abe47e7de56 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/700efd06071f44b7bf5a6abe47e7de56 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/0e03ad45bfd44fabb57aced6f672d242 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/0e03ad45bfd44fabb57aced6f672d242 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/5fa73bce40a846cdb94b05a45efd6836 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/5fa73bce40a846cdb94b05a45efd6836 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/309bd8bd26cd43ceb08706f374e14a2a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/309bd8bd26cd43ceb08706f374e14a2a 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/bcc1d271c6b44438ade7d84608170cb0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/bcc1d271c6b44438ade7d84608170cb0 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/03c39b041afe4009aaf3586f3cccc2bb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/03c39b041afe4009aaf3586f3cccc2bb 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f3d1b79753894a51a886c14b45e24006 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/f3d1b79753894a51a886c14b45e24006 2024-12-13T21:31:21,798 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/694e0de7c60243bebdbe345112c4dcf3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/694e0de7c60243bebdbe345112c4dcf3 2024-12-13T21:31:21,799 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6f4b04e7f38e4be1b8766a6f4ab498d4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/6f4b04e7f38e4be1b8766a6f4ab498d4 2024-12-13T21:31:21,799 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8004043723dd47f1b1a7e51e705d819a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/8004043723dd47f1b1a7e51e705d819a 2024-12-13T21:31:21,799 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b53c900f1e1847c5a8eda642034b34a9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/b53c900f1e1847c5a8eda642034b34a9 2024-12-13T21:31:21,799 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/d8acbf49f42a4572b7068a1c547d86fb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/d8acbf49f42a4572b7068a1c547d86fb 2024-12-13T21:31:21,799 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3cba472d520748fc8f3061cc58f031fe to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/3cba472d520748fc8f3061cc58f031fe 2024-12-13T21:31:21,800 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/a967c109258e4bd1bc1bc9632b01896c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/a967c109258e4bd1bc1bc9632b01896c 2024-12-13T21:31:21,801 DEBUG [StoreCloser-TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/48289310d9d04cd889b68fd3694e44ad, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1b5cfdaad07d4a399b9d370b4efa8c91, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/369ad4107d984985bd4ac7b7fc15895e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/98cca9fc87bd4cdd91692a8e21edd0bb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/4fdce5a31de144cabe6948ca5e0cf9e2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/ea75597592d442fbacf850c34858103b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/3c683a97cfc847d9822308373696c51c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/f7ef92097dcd46c7bc63b660403ca1ef, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6b56cd3a22754246b15e16c7c9bf0309, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fda8704ad48b4395b52fc34fb32baa56, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/17fd9a0129f84ef3a88133295f60d7fc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/0414003848ad44b68dab741039c3aa1b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c29e8072ebf241b282324e9478833521, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fd632df9169142e498a34666497f2a44, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/83c08d72f1bd43c19be2b28e50da4334, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/5a7bac31882f42889cca441ed7263730, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/d09ea29e250a4d6ca307172687ec9233, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/46d51db89c7d442b9a492b974dddf3a2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1f4746df1b114d81af3312644d6c4cdf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/02c0c793877c4c7091a09bdf9e008cf9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/69df6b3f42ea495b976921f22977bf37, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/8aba03f8e18947a4a71fb959183e1987, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6da2ca33b4fc4d3dab5c2f386702782d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c5be0f72926e45a69bc3a18a7a4d42ba, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/beb8cdb647a94e86a37328b00e297707, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/a96ac905600b4ff2bb161c1500e7fe22] to archive 2024-12-13T21:31:21,801 DEBUG [StoreCloser-TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:31:21,803 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/369ad4107d984985bd4ac7b7fc15895e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/369ad4107d984985bd4ac7b7fc15895e 2024-12-13T21:31:21,803 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1b5cfdaad07d4a399b9d370b4efa8c91 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1b5cfdaad07d4a399b9d370b4efa8c91 2024-12-13T21:31:21,803 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/ea75597592d442fbacf850c34858103b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/ea75597592d442fbacf850c34858103b 2024-12-13T21:31:21,803 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/98cca9fc87bd4cdd91692a8e21edd0bb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/98cca9fc87bd4cdd91692a8e21edd0bb 2024-12-13T21:31:21,803 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/48289310d9d04cd889b68fd3694e44ad to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/48289310d9d04cd889b68fd3694e44ad 2024-12-13T21:31:21,804 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/4fdce5a31de144cabe6948ca5e0cf9e2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/4fdce5a31de144cabe6948ca5e0cf9e2 2024-12-13T21:31:21,804 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/3c683a97cfc847d9822308373696c51c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/3c683a97cfc847d9822308373696c51c 2024-12-13T21:31:21,804 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/f7ef92097dcd46c7bc63b660403ca1ef to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/f7ef92097dcd46c7bc63b660403ca1ef 2024-12-13T21:31:21,805 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6b56cd3a22754246b15e16c7c9bf0309 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6b56cd3a22754246b15e16c7c9bf0309 2024-12-13T21:31:21,805 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fda8704ad48b4395b52fc34fb32baa56 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fda8704ad48b4395b52fc34fb32baa56 2024-12-13T21:31:21,805 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c29e8072ebf241b282324e9478833521 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c29e8072ebf241b282324e9478833521 2024-12-13T21:31:21,805 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/17fd9a0129f84ef3a88133295f60d7fc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/17fd9a0129f84ef3a88133295f60d7fc 2024-12-13T21:31:21,806 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/0414003848ad44b68dab741039c3aa1b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/0414003848ad44b68dab741039c3aa1b 2024-12-13T21:31:21,806 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fd632df9169142e498a34666497f2a44 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fd632df9169142e498a34666497f2a44 2024-12-13T21:31:21,806 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/83c08d72f1bd43c19be2b28e50da4334 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/83c08d72f1bd43c19be2b28e50da4334 2024-12-13T21:31:21,806 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/5a7bac31882f42889cca441ed7263730 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/5a7bac31882f42889cca441ed7263730 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/d09ea29e250a4d6ca307172687ec9233 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/d09ea29e250a4d6ca307172687ec9233 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/46d51db89c7d442b9a492b974dddf3a2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/46d51db89c7d442b9a492b974dddf3a2 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1f4746df1b114d81af3312644d6c4cdf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/1f4746df1b114d81af3312644d6c4cdf 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/02c0c793877c4c7091a09bdf9e008cf9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/02c0c793877c4c7091a09bdf9e008cf9 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/8aba03f8e18947a4a71fb959183e1987 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/8aba03f8e18947a4a71fb959183e1987 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6da2ca33b4fc4d3dab5c2f386702782d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/6da2ca33b4fc4d3dab5c2f386702782d 2024-12-13T21:31:21,807 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/69df6b3f42ea495b976921f22977bf37 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/69df6b3f42ea495b976921f22977bf37 2024-12-13T21:31:21,808 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c5be0f72926e45a69bc3a18a7a4d42ba to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/c5be0f72926e45a69bc3a18a7a4d42ba 2024-12-13T21:31:21,808 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/beb8cdb647a94e86a37328b00e297707 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/beb8cdb647a94e86a37328b00e297707 2024-12-13T21:31:21,808 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/a96ac905600b4ff2bb161c1500e7fe22 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/a96ac905600b4ff2bb161c1500e7fe22 2024-12-13T21:31:21,812 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/recovered.edits/422.seqid, newMaxSeqId=422, maxSeqId=4 2024-12-13T21:31:21,813 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446. 2024-12-13T21:31:21,813 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for e2eceadaa1cf76613a4f5d367b5ca446: 2024-12-13T21:31:21,814 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:21,814 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e2eceadaa1cf76613a4f5d367b5ca446, regionState=CLOSED 2024-12-13T21:31:21,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-13T21:31:21,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure e2eceadaa1cf76613a4f5d367b5ca446, server=fd052dae32be,38989,1734125418878 in 2.4970 sec 2024-12-13T21:31:21,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-13T21:31:21,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e2eceadaa1cf76613a4f5d367b5ca446, UNASSIGN in 2.5000 sec 2024-12-13T21:31:21,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-13T21:31:21,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.5030 sec 2024-12-13T21:31:21,820 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125481820"}]},"ts":"1734125481820"} 2024-12-13T21:31:21,820 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-13T21:31:21,864 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-13T21:31:21,866 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.5710 sec 2024-12-13T21:31:23,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T21:31:23,409 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-13T21:31:23,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-12-13T21:31:23,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,413 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-13T21:31:23,414 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,418 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,421 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/recovered.edits] 2024-12-13T21:31:23,425 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/fb2396fe5e3e42efaacd65cd9c1ee968 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/fb2396fe5e3e42efaacd65cd9c1ee968 2024-12-13T21:31:23,425 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3b0950bb51584613adc0740042f670b1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/A/3b0950bb51584613adc0740042f670b1 2024-12-13T21:31:23,428 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/7506d8fd99f343b4b7f1312b705f324c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/7506d8fd99f343b4b7f1312b705f324c 2024-12-13T21:31:23,428 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/9036dce08fae42359c143d713621e353 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/B/9036dce08fae42359c143d713621e353 2024-12-13T21:31:23,431 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/2e274643ea66418c936153f1d39c661b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/2e274643ea66418c936153f1d39c661b 2024-12-13T21:31:23,431 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/751ee33c9ba9493a89a5e4d7ba9c152c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/751ee33c9ba9493a89a5e4d7ba9c152c 2024-12-13T21:31:23,431 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/634515d03c74490684726cbf59e1976e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/634515d03c74490684726cbf59e1976e 2024-12-13T21:31:23,431 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fc1f045833164fd8bf1b59780e60c480 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/C/fc1f045833164fd8bf1b59780e60c480 2024-12-13T21:31:23,433 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/recovered.edits/422.seqid to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446/recovered.edits/422.seqid 2024-12-13T21:31:23,433 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,433 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-13T21:31:23,433 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-13T21:31:23,434 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412131934b79d87ad4e2b88363ff6bace0d91_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412131934b79d87ad4e2b88363ff6bace0d91_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213276e871e23b14a5a9cd571b8af635e55_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213276e871e23b14a5a9cd571b8af635e55_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121307d42bf8144f46b29c01f16722da9787_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121307d42bf8144f46b29c01f16722da9787_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412131776138d84cd4cd882d629f4bf4b9aa4_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412131776138d84cd4cd882d629f4bf4b9aa4_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213286c14210bdf4a0097fdd4ccff0b2166_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213286c14210bdf4a0097fdd4ccff0b2166_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412132dea0deb58514a7e8c213d9831b3ac95_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412132dea0deb58514a7e8c213d9831b3ac95_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,441 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121353818eb46d6d4897851670fbebe1f17c_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121353818eb46d6d4897851670fbebe1f17c_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,442 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412133a756c91c72e4a0aa2a6e468e68eeafa_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412133a756c91c72e4a0aa2a6e468e68eeafa_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,442 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b0e7598186c146389b76ea9583fb66cf_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b0e7598186c146389b76ea9583fb66cf_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,442 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b86ef51241904a77820fb79b90173d2c_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b86ef51241904a77820fb79b90173d2c_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,442 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121358cee122f5b7471d8aa02375531ff178_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121358cee122f5b7471d8aa02375531ff178_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,443 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b10f3baef3304e4b8c357ae140a3a776_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b10f3baef3304e4b8c357ae140a3a776_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,443 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ae3be48e531e405b8a36b5a13b3d7785_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ae3be48e531e405b8a36b5a13b3d7785_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,443 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412135e7f73c12b16403cae35a1735d75d12e_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412135e7f73c12b16403cae35a1735d75d12e_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,443 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213be715aae1aea439c99d498989e3ac18f_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213be715aae1aea439c99d498989e3ac18f_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,443 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b9891c312dd14b149bbe8f5f6c025d18_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b9891c312dd14b149bbe8f5f6c025d18_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,443 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c8b237b75777419b8567ce7451664a7a_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c8b237b75777419b8567ce7451664a7a_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,444 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213cee8f5691d0e4b6a90644dc9ec9557d8_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213cee8f5691d0e4b6a90644dc9ec9557d8_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,444 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213e571814190f044c79467205159c63da5_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213e571814190f044c79467205159c63da5_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,444 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f3f0255180474edb8334cd5b9024789f_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f3f0255180474edb8334cd5b9024789f_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,444 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213d8b6064289804cee83057e7660955b2a_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213d8b6064289804cee83057e7660955b2a_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,444 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ed74de8ba2e9469e843d589220ac293a_e2eceadaa1cf76613a4f5d367b5ca446 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ed74de8ba2e9469e843d589220ac293a_e2eceadaa1cf76613a4f5d367b5ca446 2024-12-13T21:31:23,444 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-13T21:31:23,446 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,448 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-13T21:31:23,450 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-13T21:31:23,451 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,451 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-13T21:31:23,451 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734125483451"}]},"ts":"9223372036854775807"} 2024-12-13T21:31:23,453 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T21:31:23,453 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e2eceadaa1cf76613a4f5d367b5ca446, NAME => 'TestAcidGuarantees,,1734125453966.e2eceadaa1cf76613a4f5d367b5ca446.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T21:31:23,453 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-13T21:31:23,454 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734125483453"}]},"ts":"9223372036854775807"} 2024-12-13T21:31:23,456 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-13T21:31:23,498 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 88 msec 2024-12-13T21:31:23,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-13T21:31:23,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-13T21:31:23,529 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=248 (was 247) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1126771990_22 at /127.0.0.1:42692 [Waiting for operation #676] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_656307843_22 at /127.0.0.1:55354 [Waiting for operation #553] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_656307843_22 at /127.0.0.1:56258 [Waiting for operation #1005] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1126771990_22 at /127.0.0.1:55344 [Waiting for operation #501] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1a289b3d-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=465 (was 458) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=397 (was 269) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2635 (was 2825) 2024-12-13T21:31:23,540 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=248, OpenFileDescriptor=465, MaxFileDescriptor=1048576, SystemLoadAverage=397, ProcessCount=11, AvailableMemoryMB=2635 2024-12-13T21:31:23,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:31:23,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:31:23,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:23,544 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:31:23,544 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:23,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-13T21:31:23,545 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:31:23,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T21:31:23,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742085_1261 (size=963) 2024-12-13T21:31:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T21:31:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T21:31:23,958 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:31:23,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742086_1262 (size=53) 2024-12-13T21:31:24,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T21:31:24,370 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:31:24,371 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3c1b6e03dacebdc2f9aa13c07eb1be8e, disabling compactions & flushes 2024-12-13T21:31:24,371 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,371 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,371 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. after waiting 0 ms 2024-12-13T21:31:24,371 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,371 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,371 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:24,374 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:31:24,375 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734125484374"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125484374"}]},"ts":"1734125484374"} 2024-12-13T21:31:24,377 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:31:24,378 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:31:24,378 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125484378"}]},"ts":"1734125484378"} 2024-12-13T21:31:24,379 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-13T21:31:24,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, ASSIGN}] 2024-12-13T21:31:24,435 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, ASSIGN 2024-12-13T21:31:24,436 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:31:24,501 DEBUG [master/fd052dae32be:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3a53a56d105c8fa6ce8789bb1b9d7a71 changed from -1.0 to 0.0, refreshing cache 2024-12-13T21:31:24,587 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=3c1b6e03dacebdc2f9aa13c07eb1be8e, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:24,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:31:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T21:31:24,743 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:24,746 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,746 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:31:24,746 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,746 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:31:24,746 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,747 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,748 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,749 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:24,749 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c1b6e03dacebdc2f9aa13c07eb1be8e columnFamilyName A 2024-12-13T21:31:24,749 DEBUG [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:24,749 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.HStore(327): Store=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:24,749 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,750 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:24,751 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c1b6e03dacebdc2f9aa13c07eb1be8e columnFamilyName B 2024-12-13T21:31:24,751 DEBUG [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:24,751 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.HStore(327): Store=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:24,751 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,752 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:24,752 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c1b6e03dacebdc2f9aa13c07eb1be8e columnFamilyName C 2024-12-13T21:31:24,752 DEBUG [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:24,753 INFO [StoreOpener-3c1b6e03dacebdc2f9aa13c07eb1be8e-1 {}] regionserver.HStore(327): Store=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:24,753 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,754 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,754 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,756 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:31:24,757 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:24,760 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:31:24,760 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 3c1b6e03dacebdc2f9aa13c07eb1be8e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67755126, jitterRate=0.009630054235458374}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:31:24,761 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:24,762 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., pid=70, masterSystemTime=1734125484743 2024-12-13T21:31:24,763 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,763 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:24,763 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=3c1b6e03dacebdc2f9aa13c07eb1be8e, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:24,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-13T21:31:24,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 in 174 msec 2024-12-13T21:31:24,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-13T21:31:24,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, ASSIGN in 335 msec 2024-12-13T21:31:24,767 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:31:24,767 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125484767"}]},"ts":"1734125484767"} 2024-12-13T21:31:24,768 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-13T21:31:24,773 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:31:24,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2310 sec 2024-12-13T21:31:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T21:31:25,656 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-13T21:31:25,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c8a18c7 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e0e280 2024-12-13T21:31:25,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67f02d8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,700 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,701 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,702 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:31:25,703 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:31:25,705 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45426917 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473477dd 2024-12-13T21:31:25,714 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21cebefa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,716 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e7fc60d to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a91dc80 2024-12-13T21:31:25,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e7c8846, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,725 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e66ea50 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a874cc0 2024-12-13T21:31:25,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4093d76e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,732 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f50b381 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6119e7 2024-12-13T21:31:25,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31178bc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,740 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x124edab0 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7507573f 2024-12-13T21:31:25,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78439bc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,750 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e5c7476 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a2545d0 2024-12-13T21:31:25,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab3f837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,758 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df84068 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d039dc2 2024-12-13T21:31:25,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2834a215, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,766 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x644774bd to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15db087a 2024-12-13T21:31:25,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@187234de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,774 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60cea876 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1be4cd 2024-12-13T21:31:25,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2446f174, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,782 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10011701 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62ceb440 2024-12-13T21:31:25,790 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7190243c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:25,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:25,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-13T21:31:25,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T21:31:25,794 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:25,795 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:25,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:25,797 DEBUG [hconnection-0x63ae2784-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,797 DEBUG [hconnection-0x391a311d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,797 DEBUG [hconnection-0x47571b80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,797 DEBUG [hconnection-0x3e13eb80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,797 DEBUG [hconnection-0x2a871bba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,798 DEBUG [hconnection-0x48069a48-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,798 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,798 DEBUG [hconnection-0x3fdb6817-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,798 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,798 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,798 DEBUG [hconnection-0x30ec4335-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,799 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,799 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,799 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,799 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,800 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,807 DEBUG [hconnection-0x5b26abf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,809 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:25,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:31:25,810 DEBUG [hconnection-0x49d2fa99-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:25,813 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:25,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:25,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:25,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:25,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:25,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:25,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:25,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/09d0b5f1303e4bcf9ca735223d96c5da is 50, key is test_row_0/A:col10/1734125485808/Put/seqid=0 2024-12-13T21:31:25,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742087_1263 (size=12001) 2024-12-13T21:31:25,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/09d0b5f1303e4bcf9ca735223d96c5da 2024-12-13T21:31:25,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125545840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125545840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125545841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125545841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125545841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/678775fbd8d54c6db79d0d542b92d951 is 50, key is test_row_0/B:col10/1734125485808/Put/seqid=0 2024-12-13T21:31:25,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742088_1264 (size=12001) 2024-12-13T21:31:25,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/678775fbd8d54c6db79d0d542b92d951 2024-12-13T21:31:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T21:31:25,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/5c391935def94feea88045cd0a12d0b8 is 50, key is test_row_0/C:col10/1734125485808/Put/seqid=0 2024-12-13T21:31:25,946 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125545946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-13T21:31:25,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:25,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:25,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:25,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:25,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:25,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:25,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125545946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125545947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125545947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125545947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:25,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742089_1265 (size=12001) 2024-12-13T21:31:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T21:31:26,100 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-13T21:31:26,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:26,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:26,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:26,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:26,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:26,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125546148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125546151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125546151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125546152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125546152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,253 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-13T21:31:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:26,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:26,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/5c391935def94feea88045cd0a12d0b8 2024-12-13T21:31:26,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/09d0b5f1303e4bcf9ca735223d96c5da as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/09d0b5f1303e4bcf9ca735223d96c5da 2024-12-13T21:31:26,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/09d0b5f1303e4bcf9ca735223d96c5da, entries=150, sequenceid=15, filesize=11.7 K 2024-12-13T21:31:26,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/678775fbd8d54c6db79d0d542b92d951 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/678775fbd8d54c6db79d0d542b92d951 2024-12-13T21:31:26,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/678775fbd8d54c6db79d0d542b92d951, entries=150, sequenceid=15, filesize=11.7 K 2024-12-13T21:31:26,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/5c391935def94feea88045cd0a12d0b8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5c391935def94feea88045cd0a12d0b8 2024-12-13T21:31:26,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5c391935def94feea88045cd0a12d0b8, entries=150, sequenceid=15, filesize=11.7 K 2024-12-13T21:31:26,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 566ms, sequenceid=15, compaction requested=false 2024-12-13T21:31:26,375 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-13T21:31:26,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:26,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T21:31:26,406 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-13T21:31:26,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:26,406 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-13T21:31:26,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:26,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:26,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:26,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:26,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:26,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:26,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8f26ae181c454db2b04155d3e9b5bdce is 50, key is test_row_0/A:col10/1734125485839/Put/seqid=0 2024-12-13T21:31:26,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742090_1266 (size=12001) 2024-12-13T21:31:26,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:26,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:26,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125546458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125546460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125546461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125546462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125546462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125546563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125546564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125546564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125546564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125546565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125546766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125546767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125546767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125546767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125546768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:26,818 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8f26ae181c454db2b04155d3e9b5bdce 2024-12-13T21:31:26,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/07c8002855934488b43dff5b49eaca7e is 50, key is test_row_0/B:col10/1734125485839/Put/seqid=0 2024-12-13T21:31:26,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742091_1267 (size=12001) 2024-12-13T21:31:26,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T21:31:27,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125547069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125547069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125547069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125547069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125547070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,238 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/07c8002855934488b43dff5b49eaca7e 2024-12-13T21:31:27,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/9f6a13cc515a4e91b76c3d29be03ed17 is 50, key is test_row_0/C:col10/1734125485839/Put/seqid=0 2024-12-13T21:31:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742092_1268 (size=12001) 2024-12-13T21:31:27,338 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:31:27,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125547573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125547574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125547575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:27,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125547576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125547575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:27,648 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/9f6a13cc515a4e91b76c3d29be03ed17 2024-12-13T21:31:27,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8f26ae181c454db2b04155d3e9b5bdce as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8f26ae181c454db2b04155d3e9b5bdce 2024-12-13T21:31:27,663 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8f26ae181c454db2b04155d3e9b5bdce, entries=150, sequenceid=37, filesize=11.7 K 2024-12-13T21:31:27,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/07c8002855934488b43dff5b49eaca7e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/07c8002855934488b43dff5b49eaca7e 2024-12-13T21:31:27,669 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/07c8002855934488b43dff5b49eaca7e, entries=150, sequenceid=37, filesize=11.7 K 2024-12-13T21:31:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/9f6a13cc515a4e91b76c3d29be03ed17 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/9f6a13cc515a4e91b76c3d29be03ed17 2024-12-13T21:31:27,674 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/9f6a13cc515a4e91b76c3d29be03ed17, entries=150, sequenceid=37, filesize=11.7 K 2024-12-13T21:31:27,675 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1269ms, sequenceid=37, compaction requested=false 2024-12-13T21:31:27,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:27,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:27,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-13T21:31:27,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-13T21:31:27,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-13T21:31:27,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8820 sec 2024-12-13T21:31:27,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.8860 sec 2024-12-13T21:31:27,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T21:31:27,898 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-13T21:31:27,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:27,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-13T21:31:27,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-13T21:31:27,900 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:27,901 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:27,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:28,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-13T21:31:28,052 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-13T21:31:28,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:28,053 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:31:28,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:28,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:28,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:28,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:28,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:28,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:28,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/070ada7b9c644d16afe85b0f859b537c is 50, key is test_row_0/A:col10/1734125486457/Put/seqid=0 2024-12-13T21:31:28,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742093_1269 (size=12001) 2024-12-13T21:31:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-13T21:31:28,462 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/070ada7b9c644d16afe85b0f859b537c 2024-12-13T21:31:28,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/9ac176cab09f4d9ebb52d28306541910 is 50, key is test_row_0/B:col10/1734125486457/Put/seqid=0 2024-12-13T21:31:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742094_1270 (size=12001) 2024-12-13T21:31:28,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-13T21:31:28,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:28,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:28,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125548590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125548591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125548592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125548592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125548593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125548693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125548693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125548695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125548695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125548695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,889 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/9ac176cab09f4d9ebb52d28306541910 2024-12-13T21:31:28,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/1ccf5d8d82ce4685bec7fa1ba248b085 is 50, key is test_row_0/C:col10/1734125486457/Put/seqid=0 2024-12-13T21:31:28,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125548896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125548897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125548898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742095_1271 (size=12001) 2024-12-13T21:31:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125548898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:28,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125548899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-13T21:31:29,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125549198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125549201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125549202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125549203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125549203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,301 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/1ccf5d8d82ce4685bec7fa1ba248b085 2024-12-13T21:31:29,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/070ada7b9c644d16afe85b0f859b537c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/070ada7b9c644d16afe85b0f859b537c 2024-12-13T21:31:29,313 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/070ada7b9c644d16afe85b0f859b537c, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:31:29,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/9ac176cab09f4d9ebb52d28306541910 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ac176cab09f4d9ebb52d28306541910 2024-12-13T21:31:29,318 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ac176cab09f4d9ebb52d28306541910, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:31:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/1ccf5d8d82ce4685bec7fa1ba248b085 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/1ccf5d8d82ce4685bec7fa1ba248b085 2024-12-13T21:31:29,323 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/1ccf5d8d82ce4685bec7fa1ba248b085, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:31:29,324 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1271ms, sequenceid=51, compaction requested=true 2024-12-13T21:31:29,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:29,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:29,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-13T21:31:29,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-13T21:31:29,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-13T21:31:29,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4250 sec 2024-12-13T21:31:29,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.4290 sec 2024-12-13T21:31:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:29,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:31:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:29,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:29,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8d8dba3631bc4540bd0d4bf85d8ede36 is 50, key is test_row_0/A:col10/1734125488592/Put/seqid=0 2024-12-13T21:31:29,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125549713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125549713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125549714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125549715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125549715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742096_1272 (size=12001) 2024-12-13T21:31:29,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125549816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125549816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125549818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125549819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:29,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125549822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-13T21:31:30,005 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-13T21:31:30,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:30,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-13T21:31:30,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-13T21:31:30,007 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:30,008 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:30,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:30,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125550019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125550020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125550021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125550022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125550024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-13T21:31:30,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8d8dba3631bc4540bd0d4bf85d8ede36 2024-12-13T21:31:30,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/1907deb90e0e462b8bddb379dd39fc46 is 50, key is test_row_0/B:col10/1734125488592/Put/seqid=0 2024-12-13T21:31:30,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742097_1273 (size=12001) 2024-12-13T21:31:30,159 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:30,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:30,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-13T21:31:30,312 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:30,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:30,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125550322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125550323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125550324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125550325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125550325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,466 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:30,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:30,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/1907deb90e0e462b8bddb379dd39fc46 2024-12-13T21:31:30,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/51df7499077c46a29781f837d39c167b is 50, key is test_row_0/C:col10/1734125488592/Put/seqid=0 2024-12-13T21:31:30,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742098_1274 (size=12001) 2024-12-13T21:31:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-13T21:31:30,619 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:30,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:30,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,771 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:30,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:30,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125550828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125550829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125550830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125550831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:30,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125550831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,924 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:30,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:30,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:30,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:30,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/51df7499077c46a29781f837d39c167b 2024-12-13T21:31:30,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8d8dba3631bc4540bd0d4bf85d8ede36 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8d8dba3631bc4540bd0d4bf85d8ede36 2024-12-13T21:31:30,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8d8dba3631bc4540bd0d4bf85d8ede36, entries=150, sequenceid=75, filesize=11.7 K 2024-12-13T21:31:30,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/1907deb90e0e462b8bddb379dd39fc46 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/1907deb90e0e462b8bddb379dd39fc46 2024-12-13T21:31:30,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/1907deb90e0e462b8bddb379dd39fc46, entries=150, sequenceid=75, filesize=11.7 K 2024-12-13T21:31:30,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/51df7499077c46a29781f837d39c167b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/51df7499077c46a29781f837d39c167b 2024-12-13T21:31:30,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/51df7499077c46a29781f837d39c167b, entries=150, sequenceid=75, filesize=11.7 K 2024-12-13T21:31:30,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1272ms, sequenceid=75, compaction requested=true 2024-12-13T21:31:30,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:30,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:30,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:30,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:30,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:30,980 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:30,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:30,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:30,980 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:30,981 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:30,981 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:30,981 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/A is initiating minor compaction (all files) 2024-12-13T21:31:30,981 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/B is initiating minor compaction (all files) 2024-12-13T21:31:30,981 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/A in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,981 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/B in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:30,981 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/678775fbd8d54c6db79d0d542b92d951, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/07c8002855934488b43dff5b49eaca7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ac176cab09f4d9ebb52d28306541910, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/1907deb90e0e462b8bddb379dd39fc46] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=46.9 K 2024-12-13T21:31:30,981 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/09d0b5f1303e4bcf9ca735223d96c5da, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8f26ae181c454db2b04155d3e9b5bdce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/070ada7b9c644d16afe85b0f859b537c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8d8dba3631bc4540bd0d4bf85d8ede36] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=46.9 K 2024-12-13T21:31:30,982 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 678775fbd8d54c6db79d0d542b92d951, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734125485807 2024-12-13T21:31:30,982 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09d0b5f1303e4bcf9ca735223d96c5da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734125485807 2024-12-13T21:31:30,982 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 07c8002855934488b43dff5b49eaca7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734125485833 2024-12-13T21:31:30,982 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f26ae181c454db2b04155d3e9b5bdce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734125485833 2024-12-13T21:31:30,982 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ac176cab09f4d9ebb52d28306541910, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125486457 2024-12-13T21:31:30,982 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 070ada7b9c644d16afe85b0f859b537c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125486457 2024-12-13T21:31:30,983 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1907deb90e0e462b8bddb379dd39fc46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734125488591 2024-12-13T21:31:30,983 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d8dba3631bc4540bd0d4bf85d8ede36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734125488591 2024-12-13T21:31:30,990 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#B#compaction#224 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:30,991 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/a3f904c7eaf847b584105502ce1d8f82 is 50, key is test_row_0/B:col10/1734125488592/Put/seqid=0 2024-12-13T21:31:30,993 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:30,993 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/c9453471af4e43bd95456dad04ce9f6f is 50, key is test_row_0/A:col10/1734125488592/Put/seqid=0 2024-12-13T21:31:30,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742099_1275 (size=12139) 2024-12-13T21:31:31,001 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/a3f904c7eaf847b584105502ce1d8f82 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a3f904c7eaf847b584105502ce1d8f82 2024-12-13T21:31:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742100_1276 (size=12139) 2024-12-13T21:31:31,008 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/B of 3c1b6e03dacebdc2f9aa13c07eb1be8e into a3f904c7eaf847b584105502ce1d8f82(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:31,008 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:31,008 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, priority=12, startTime=1734125490980; duration=0sec 2024-12-13T21:31:31,008 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:31,008 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:B 2024-12-13T21:31:31,008 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:31,009 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:31,009 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/C is initiating minor compaction (all files) 2024-12-13T21:31:31,009 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/C in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:31,009 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5c391935def94feea88045cd0a12d0b8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/9f6a13cc515a4e91b76c3d29be03ed17, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/1ccf5d8d82ce4685bec7fa1ba248b085, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/51df7499077c46a29781f837d39c167b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=46.9 K 2024-12-13T21:31:31,009 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/c9453471af4e43bd95456dad04ce9f6f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/c9453471af4e43bd95456dad04ce9f6f 2024-12-13T21:31:31,010 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c391935def94feea88045cd0a12d0b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734125485807 2024-12-13T21:31:31,010 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f6a13cc515a4e91b76c3d29be03ed17, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734125485833 2024-12-13T21:31:31,010 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ccf5d8d82ce4685bec7fa1ba248b085, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125486457 2024-12-13T21:31:31,010 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 51df7499077c46a29781f837d39c167b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734125488591 2024-12-13T21:31:31,013 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/A of 3c1b6e03dacebdc2f9aa13c07eb1be8e into c9453471af4e43bd95456dad04ce9f6f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:31,013 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:31,013 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, priority=12, startTime=1734125490980; duration=0sec 2024-12-13T21:31:31,013 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:31,013 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:A 2024-12-13T21:31:31,018 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:31,018 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/640ae4cf66d346d3a8d4e2358761eb2a is 50, key is test_row_0/C:col10/1734125488592/Put/seqid=0 2024-12-13T21:31:31,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742101_1277 (size=12139) 2024-12-13T21:31:31,037 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/640ae4cf66d346d3a8d4e2358761eb2a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/640ae4cf66d346d3a8d4e2358761eb2a 2024-12-13T21:31:31,041 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/C of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 640ae4cf66d346d3a8d4e2358761eb2a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:31,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:31,041 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, priority=12, startTime=1734125490980; duration=0sec 2024-12-13T21:31:31,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:31,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:C 2024-12-13T21:31:31,076 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:31,077 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:31,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:31,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6ad226e229534826bf599084ce52386a is 50, key is test_row_0/A:col10/1734125489713/Put/seqid=0 2024-12-13T21:31:31,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742102_1278 (size=12001) 2024-12-13T21:31:31,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-13T21:31:31,485 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6ad226e229534826bf599084ce52386a 2024-12-13T21:31:31,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/4438af6778414d2293ff73c133b6cf71 is 50, key is test_row_0/B:col10/1734125489713/Put/seqid=0 2024-12-13T21:31:31,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742103_1279 (size=12001) 2024-12-13T21:31:31,501 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/4438af6778414d2293ff73c133b6cf71 2024-12-13T21:31:31,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/492c11c8304c492689fd7d836cec07a6 is 50, key is test_row_0/C:col10/1734125489713/Put/seqid=0 2024-12-13T21:31:31,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742104_1280 (size=12001) 2024-12-13T21:31:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:31,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:31,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125551844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125551845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125551846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125551846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125551846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,925 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/492c11c8304c492689fd7d836cec07a6 2024-12-13T21:31:31,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6ad226e229534826bf599084ce52386a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ad226e229534826bf599084ce52386a 2024-12-13T21:31:31,932 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ad226e229534826bf599084ce52386a, entries=150, sequenceid=90, filesize=11.7 K 2024-12-13T21:31:31,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/4438af6778414d2293ff73c133b6cf71 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/4438af6778414d2293ff73c133b6cf71 2024-12-13T21:31:31,936 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/4438af6778414d2293ff73c133b6cf71, entries=150, sequenceid=90, filesize=11.7 K 2024-12-13T21:31:31,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/492c11c8304c492689fd7d836cec07a6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/492c11c8304c492689fd7d836cec07a6 2024-12-13T21:31:31,941 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/492c11c8304c492689fd7d836cec07a6, entries=150, sequenceid=90, filesize=11.7 K 2024-12-13T21:31:31,942 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 865ms, sequenceid=90, compaction requested=false 2024-12-13T21:31:31,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:31,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:31,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-13T21:31:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-13T21:31:31,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-13T21:31:31,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9350 sec 2024-12-13T21:31:31,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.9380 sec 2024-12-13T21:31:31,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-13T21:31:31,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:31,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:31,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:31,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:31,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:31,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:31,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/0749edbb26fa47df9d7c43496ecc1e29 is 50, key is test_row_0/A:col10/1734125491948/Put/seqid=0 2024-12-13T21:31:31,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125551952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125551953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125551955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125551955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125551956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:31,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742105_1281 (size=12001) 2024-12-13T21:31:32,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125552056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125552057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125552057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125552059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125552059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-13T21:31:32,111 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-13T21:31:32,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-13T21:31:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:32,115 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:32,116 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:32,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:32,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:32,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125552259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125552262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125552263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125552263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125552263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:32,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:32,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/0749edbb26fa47df9d7c43496ecc1e29 2024-12-13T21:31:32,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/2a03a123e62b49ce8ba13b18567101c1 is 50, key is test_row_0/B:col10/1734125491948/Put/seqid=0 2024-12-13T21:31:32,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742106_1282 (size=12001) 2024-12-13T21:31:32,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/2a03a123e62b49ce8ba13b18567101c1 2024-12-13T21:31:32,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:32,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bfc8afe5524f4a318a3e04ef8299d9df is 50, key is test_row_0/C:col10/1734125491948/Put/seqid=0 2024-12-13T21:31:32,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742107_1283 (size=12001) 2024-12-13T21:31:32,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bfc8afe5524f4a318a3e04ef8299d9df 2024-12-13T21:31:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/0749edbb26fa47df9d7c43496ecc1e29 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/0749edbb26fa47df9d7c43496ecc1e29 2024-12-13T21:31:32,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/0749edbb26fa47df9d7c43496ecc1e29, entries=150, sequenceid=116, filesize=11.7 K 2024-12-13T21:31:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/2a03a123e62b49ce8ba13b18567101c1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2a03a123e62b49ce8ba13b18567101c1 2024-12-13T21:31:32,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2a03a123e62b49ce8ba13b18567101c1, entries=150, sequenceid=116, filesize=11.7 K 2024-12-13T21:31:32,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bfc8afe5524f4a318a3e04ef8299d9df as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bfc8afe5524f4a318a3e04ef8299d9df 2024-12-13T21:31:32,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bfc8afe5524f4a318a3e04ef8299d9df, entries=150, sequenceid=116, filesize=11.7 K 2024-12-13T21:31:32,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 505ms, sequenceid=116, compaction requested=true 2024-12-13T21:31:32,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:32,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:32,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:32,454 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:32,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:32,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:32,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:32,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:31:32,455 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:32,456 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:32,456 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:32,457 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/B is initiating minor compaction (all files) 2024-12-13T21:31:32,457 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/A is initiating minor compaction (all files) 2024-12-13T21:31:32,457 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/B in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,457 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/A in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,457 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a3f904c7eaf847b584105502ce1d8f82, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/4438af6778414d2293ff73c133b6cf71, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2a03a123e62b49ce8ba13b18567101c1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=35.3 K 2024-12-13T21:31:32,457 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/c9453471af4e43bd95456dad04ce9f6f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ad226e229534826bf599084ce52386a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/0749edbb26fa47df9d7c43496ecc1e29] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=35.3 K 2024-12-13T21:31:32,458 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c9453471af4e43bd95456dad04ce9f6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734125488591 2024-12-13T21:31:32,458 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3f904c7eaf847b584105502ce1d8f82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734125488591 2024-12-13T21:31:32,458 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ad226e229534826bf599084ce52386a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734125489709 2024-12-13T21:31:32,459 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4438af6778414d2293ff73c133b6cf71, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734125489709 2024-12-13T21:31:32,459 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0749edbb26fa47df9d7c43496ecc1e29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734125491845 2024-12-13T21:31:32,459 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a03a123e62b49ce8ba13b18567101c1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734125491845 2024-12-13T21:31:32,472 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#A#compaction#233 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:32,472 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#B#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:32,473 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/c93b31c2c1214dbe96ee6e03c0b5974b is 50, key is test_row_0/B:col10/1734125491948/Put/seqid=0 2024-12-13T21:31:32,473 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/366b0965ea7741f782788ed9c788ece5 is 50, key is test_row_0/A:col10/1734125491948/Put/seqid=0 2024-12-13T21:31:32,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742108_1284 (size=12241) 2024-12-13T21:31:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742109_1285 (size=12241) 2024-12-13T21:31:32,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:32,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:31:32,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:32,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:32,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:32,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:32,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:32,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:32,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/038c7d3b81964c88b53a7d73553fff9d is 50, key is test_row_0/A:col10/1734125491955/Put/seqid=0 2024-12-13T21:31:32,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742110_1286 (size=12001) 2024-12-13T21:31:32,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125552581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125552581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125552583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125552583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125552583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125552684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125552684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125552686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125552686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125552687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:32,730 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:32,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:32,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,882 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:32,885 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/c93b31c2c1214dbe96ee6e03c0b5974b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/c93b31c2c1214dbe96ee6e03c0b5974b 2024-12-13T21:31:32,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125552887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125552887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,889 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/B of 3c1b6e03dacebdc2f9aa13c07eb1be8e into c93b31c2c1214dbe96ee6e03c0b5974b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:32,889 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:32,889 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, priority=13, startTime=1734125492454; duration=0sec 2024-12-13T21:31:32,889 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:32,889 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:B 2024-12-13T21:31:32,889 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:32,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125552889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,890 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:32,891 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/C is initiating minor compaction (all files) 2024-12-13T21:31:32,891 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/C in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,891 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/640ae4cf66d346d3a8d4e2358761eb2a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/492c11c8304c492689fd7d836cec07a6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bfc8afe5524f4a318a3e04ef8299d9df] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=35.3 K 2024-12-13T21:31:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125552890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125552890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:32,891 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 640ae4cf66d346d3a8d4e2358761eb2a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734125488591 2024-12-13T21:31:32,891 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 492c11c8304c492689fd7d836cec07a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734125489709 2024-12-13T21:31:32,892 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfc8afe5524f4a318a3e04ef8299d9df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734125491845 2024-12-13T21:31:32,897 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#C#compaction#236 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:32,898 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/ce17dcd2bc8940348618adb9084e98a5 is 50, key is test_row_0/C:col10/1734125491948/Put/seqid=0 2024-12-13T21:31:32,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742111_1287 (size=12241) 2024-12-13T21:31:32,906 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/366b0965ea7741f782788ed9c788ece5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/366b0965ea7741f782788ed9c788ece5 2024-12-13T21:31:32,911 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/A of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 366b0965ea7741f782788ed9c788ece5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:32,911 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:32,911 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, priority=13, startTime=1734125492454; duration=0sec 2024-12-13T21:31:32,911 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:32,911 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:A 2024-12-13T21:31:32,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/038c7d3b81964c88b53a7d73553fff9d 2024-12-13T21:31:32,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/0bf7b33121334c8ebeb4d886de04af39 is 50, key is test_row_0/B:col10/1734125491955/Put/seqid=0 2024-12-13T21:31:32,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742112_1288 (size=12001) 2024-12-13T21:31:33,035 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:33,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,187 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:33,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125553191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125553191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125553192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125553193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125553194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:33,305 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/ce17dcd2bc8940348618adb9084e98a5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/ce17dcd2bc8940348618adb9084e98a5 2024-12-13T21:31:33,310 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/C of 3c1b6e03dacebdc2f9aa13c07eb1be8e into ce17dcd2bc8940348618adb9084e98a5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:33,310 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:33,310 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, priority=13, startTime=1734125492455; duration=0sec 2024-12-13T21:31:33,310 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:33,310 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:C 2024-12-13T21:31:33,339 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:33,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/0bf7b33121334c8ebeb4d886de04af39 2024-12-13T21:31:33,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/537206e16ddc4463b7c41b29146f4677 is 50, key is test_row_0/C:col10/1734125491955/Put/seqid=0 2024-12-13T21:31:33,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742113_1289 (size=12001) 2024-12-13T21:31:33,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:33,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125553695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125553695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125553696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125553696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:33,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125553698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/537206e16ddc4463b7c41b29146f4677 2024-12-13T21:31:33,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/038c7d3b81964c88b53a7d73553fff9d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/038c7d3b81964c88b53a7d73553fff9d 2024-12-13T21:31:33,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:33,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/038c7d3b81964c88b53a7d73553fff9d, entries=150, sequenceid=128, filesize=11.7 K 2024-12-13T21:31:33,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/0bf7b33121334c8ebeb4d886de04af39 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/0bf7b33121334c8ebeb4d886de04af39 2024-12-13T21:31:33,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/0bf7b33121334c8ebeb4d886de04af39, entries=150, sequenceid=128, filesize=11.7 K 2024-12-13T21:31:33,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:33,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/537206e16ddc4463b7c41b29146f4677 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/537206e16ddc4463b7c41b29146f4677 2024-12-13T21:31:33,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/537206e16ddc4463b7c41b29146f4677, entries=150, sequenceid=128, filesize=11.7 K 2024-12-13T21:31:33,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1248ms, sequenceid=128, compaction requested=false 2024-12-13T21:31:33,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:33,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:33,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-13T21:31:33,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:33,959 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:31:33,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:33,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:33,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:33,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:33,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:33,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:33,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/604ca1af6efd431cba4dd078efa0b0b9 is 50, key is test_row_0/A:col10/1734125492580/Put/seqid=0 2024-12-13T21:31:33,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742114_1290 (size=12151) 2024-12-13T21:31:34,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:34,367 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/604ca1af6efd431cba4dd078efa0b0b9 2024-12-13T21:31:34,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/9ce9c513510849939e797d0370b56cbf is 50, key is test_row_0/B:col10/1734125492580/Put/seqid=0 2024-12-13T21:31:34,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742115_1291 (size=12151) 2024-12-13T21:31:34,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:34,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:34,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4533) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4953) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4947) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4943) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3233) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125554706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125554712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125554728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125554728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125554728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,776 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/9ce9c513510849939e797d0370b56cbf 2024-12-13T21:31:34,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/6ec4a926873544cca888c98e4041c919 is 50, key is test_row_0/C:col10/1734125492580/Put/seqid=0 2024-12-13T21:31:34,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742116_1292 (size=12151) 2024-12-13T21:31:34,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125554831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125554831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125554831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:34,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125554836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125555034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125555034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125555034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125555038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,186 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/6ec4a926873544cca888c98e4041c919 2024-12-13T21:31:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/604ca1af6efd431cba4dd078efa0b0b9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/604ca1af6efd431cba4dd078efa0b0b9 2024-12-13T21:31:35,193 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/604ca1af6efd431cba4dd078efa0b0b9, entries=150, sequenceid=156, filesize=11.9 K 2024-12-13T21:31:35,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/9ce9c513510849939e797d0370b56cbf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ce9c513510849939e797d0370b56cbf 2024-12-13T21:31:35,197 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ce9c513510849939e797d0370b56cbf, entries=150, sequenceid=156, filesize=11.9 K 2024-12-13T21:31:35,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/6ec4a926873544cca888c98e4041c919 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/6ec4a926873544cca888c98e4041c919 2024-12-13T21:31:35,201 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/6ec4a926873544cca888c98e4041c919, entries=150, sequenceid=156, filesize=11.9 K 2024-12-13T21:31:35,202 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1243ms, sequenceid=156, compaction requested=true 2024-12-13T21:31:35,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:35,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:35,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-13T21:31:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-13T21:31:35,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-13T21:31:35,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0950 sec 2024-12-13T21:31:35,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 3.1020 sec 2024-12-13T21:31:35,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:35,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:31:35,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:35,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:35,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:35,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:35,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:35,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:35,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/10e63c1f0e954b7cb42ad36374736d85 is 50, key is test_row_0/A:col10/1734125495337/Put/seqid=0 2024-12-13T21:31:35,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742117_1293 (size=12151) 2024-12-13T21:31:35,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/10e63c1f0e954b7cb42ad36374736d85 2024-12-13T21:31:35,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/b7b89913c120470ea4a87db6017c23e8 is 50, key is test_row_0/B:col10/1734125495337/Put/seqid=0 2024-12-13T21:31:35,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125555354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125555355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742118_1294 (size=12151) 2024-12-13T21:31:35,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125555356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125555357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125555458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125555458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125555459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125555461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125555661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125555661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125555662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125555662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/b7b89913c120470ea4a87db6017c23e8 2024-12-13T21:31:35,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/02621a6bc36c46199999dcd187f46dd9 is 50, key is test_row_0/C:col10/1734125495337/Put/seqid=0 2024-12-13T21:31:35,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742119_1295 (size=12151) 2024-12-13T21:31:35,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/02621a6bc36c46199999dcd187f46dd9 2024-12-13T21:31:35,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/10e63c1f0e954b7cb42ad36374736d85 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/10e63c1f0e954b7cb42ad36374736d85 2024-12-13T21:31:35,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/10e63c1f0e954b7cb42ad36374736d85, entries=150, sequenceid=171, filesize=11.9 K 2024-12-13T21:31:35,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/b7b89913c120470ea4a87db6017c23e8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7b89913c120470ea4a87db6017c23e8 2024-12-13T21:31:35,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7b89913c120470ea4a87db6017c23e8, entries=150, sequenceid=171, filesize=11.9 K 2024-12-13T21:31:35,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/02621a6bc36c46199999dcd187f46dd9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/02621a6bc36c46199999dcd187f46dd9 2024-12-13T21:31:35,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/02621a6bc36c46199999dcd187f46dd9, entries=150, sequenceid=171, filesize=11.9 K 2024-12-13T21:31:35,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 464ms, sequenceid=171, compaction requested=true 2024-12-13T21:31:35,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:35,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:35,802 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:35,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:35,802 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:35,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:35,808 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:35,809 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/A is initiating minor compaction (all files) 2024-12-13T21:31:35,809 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/A in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:35,809 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/366b0965ea7741f782788ed9c788ece5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/038c7d3b81964c88b53a7d73553fff9d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/604ca1af6efd431cba4dd078efa0b0b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/10e63c1f0e954b7cb42ad36374736d85] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=47.4 K 2024-12-13T21:31:35,809 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:35,809 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 366b0965ea7741f782788ed9c788ece5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734125491845 2024-12-13T21:31:35,809 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/B is initiating minor compaction (all files) 2024-12-13T21:31:35,809 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/B in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:35,809 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/c93b31c2c1214dbe96ee6e03c0b5974b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/0bf7b33121334c8ebeb4d886de04af39, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ce9c513510849939e797d0370b56cbf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7b89913c120470ea4a87db6017c23e8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=47.4 K 2024-12-13T21:31:35,809 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 038c7d3b81964c88b53a7d73553fff9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734125491950 2024-12-13T21:31:35,810 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 604ca1af6efd431cba4dd078efa0b0b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734125492580 2024-12-13T21:31:35,810 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c93b31c2c1214dbe96ee6e03c0b5974b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734125491845 2024-12-13T21:31:35,810 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10e63c1f0e954b7cb42ad36374736d85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125495337 2024-12-13T21:31:35,810 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bf7b33121334c8ebeb4d886de04af39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734125491950 2024-12-13T21:31:35,811 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ce9c513510849939e797d0370b56cbf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734125492580 2024-12-13T21:31:35,812 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b7b89913c120470ea4a87db6017c23e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125495337 2024-12-13T21:31:35,824 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#B#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:35,825 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/fc84750107974314a2fa4a114e24eac3 is 50, key is test_row_0/B:col10/1734125495337/Put/seqid=0 2024-12-13T21:31:35,828 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#A#compaction#246 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:35,829 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7 is 50, key is test_row_0/A:col10/1734125495337/Put/seqid=0 2024-12-13T21:31:35,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742120_1296 (size=12527) 2024-12-13T21:31:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742121_1297 (size=12527) 2024-12-13T21:31:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:35,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-13T21:31:35,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:35,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:35,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:35,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:35,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:35,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:35,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/84e4255ce2c64492b2a2ab4358d17d89 is 50, key is test_row_0/A:col10/1734125495972/Put/seqid=0 2024-12-13T21:31:35,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125555981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125555992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125555992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:35,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125555992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742122_1298 (size=12151) 2024-12-13T21:31:36,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/84e4255ce2c64492b2a2ab4358d17d89 2024-12-13T21:31:36,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/273538143b2e4085b1a9bf43b3dbcfce is 50, key is test_row_0/B:col10/1734125495972/Put/seqid=0 2024-12-13T21:31:36,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742123_1299 (size=12151) 2024-12-13T21:31:36,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125556093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125556096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125556097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125556097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-13T21:31:36,218 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-13T21:31:36,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:36,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-13T21:31:36,221 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:36,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-13T21:31:36,222 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:36,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:36,253 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/fc84750107974314a2fa4a114e24eac3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fc84750107974314a2fa4a114e24eac3 2024-12-13T21:31:36,259 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/B of 3c1b6e03dacebdc2f9aa13c07eb1be8e into fc84750107974314a2fa4a114e24eac3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:36,259 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:36,259 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, priority=12, startTime=1734125495802; duration=0sec 2024-12-13T21:31:36,259 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:36,259 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:B 2024-12-13T21:31:36,259 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:36,261 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:36,261 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/C is initiating minor compaction (all files) 2024-12-13T21:31:36,261 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/C in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:36,261 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/ce17dcd2bc8940348618adb9084e98a5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/537206e16ddc4463b7c41b29146f4677, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/6ec4a926873544cca888c98e4041c919, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/02621a6bc36c46199999dcd187f46dd9] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=47.4 K 2024-12-13T21:31:36,262 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ce17dcd2bc8940348618adb9084e98a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734125491845 2024-12-13T21:31:36,262 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7 2024-12-13T21:31:36,262 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 537206e16ddc4463b7c41b29146f4677, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1734125491950 2024-12-13T21:31:36,263 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ec4a926873544cca888c98e4041c919, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734125492580 2024-12-13T21:31:36,264 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 02621a6bc36c46199999dcd187f46dd9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125495337 2024-12-13T21:31:36,267 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/A of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 2d2ed0d4d09547a2b6eb4aa7cab12ad7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:36,267 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:36,267 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, priority=12, startTime=1734125495802; duration=0sec 2024-12-13T21:31:36,267 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:36,267 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:A 2024-12-13T21:31:36,272 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#C#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:36,273 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/7308b73c4c884dcbbcf22e5a93d39600 is 50, key is test_row_0/C:col10/1734125495337/Put/seqid=0 2024-12-13T21:31:36,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742124_1300 (size=12527) 2024-12-13T21:31:36,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125556296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125556299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125556300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125556300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-13T21:31:36,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-13T21:31:36,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:36,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:36,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:36,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:36,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:36,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/273538143b2e4085b1a9bf43b3dbcfce 2024-12-13T21:31:36,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/2cfe4d28e96744c680347871e590e892 is 50, key is test_row_0/C:col10/1734125495972/Put/seqid=0 2024-12-13T21:31:36,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742125_1301 (size=12151) 2024-12-13T21:31:36,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/2cfe4d28e96744c680347871e590e892 2024-12-13T21:31:36,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/84e4255ce2c64492b2a2ab4358d17d89 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/84e4255ce2c64492b2a2ab4358d17d89 2024-12-13T21:31:36,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/84e4255ce2c64492b2a2ab4358d17d89, entries=150, sequenceid=196, filesize=11.9 K 2024-12-13T21:31:36,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/273538143b2e4085b1a9bf43b3dbcfce as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/273538143b2e4085b1a9bf43b3dbcfce 2024-12-13T21:31:36,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/273538143b2e4085b1a9bf43b3dbcfce, entries=150, sequenceid=196, filesize=11.9 K 2024-12-13T21:31:36,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/2cfe4d28e96744c680347871e590e892 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/2cfe4d28e96744c680347871e590e892 2024-12-13T21:31:36,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/2cfe4d28e96744c680347871e590e892, entries=150, sequenceid=196, filesize=11.9 K 2024-12-13T21:31:36,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 536ms, sequenceid=196, compaction requested=false 2024-12-13T21:31:36,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-13T21:31:36,528 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-13T21:31:36,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:36,529 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:31:36,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:36,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:36,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:36,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:36,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:36,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:36,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/d6270e4e63ac4f9eb133611c1724e953 is 50, key is test_row_0/A:col10/1734125495979/Put/seqid=0 2024-12-13T21:31:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742126_1302 (size=12151) 2024-12-13T21:31:36,584 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/d6270e4e63ac4f9eb133611c1724e953 2024-12-13T21:31:36,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/3777d48b623548aa9540a03cdf62026a is 50, key is test_row_0/B:col10/1734125495979/Put/seqid=0 2024-12-13T21:31:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:36,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:36,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742127_1303 (size=12151) 2024-12-13T21:31:36,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125556627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125556628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125556635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125556636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,690 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/7308b73c4c884dcbbcf22e5a93d39600 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/7308b73c4c884dcbbcf22e5a93d39600 2024-12-13T21:31:36,696 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/C of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 7308b73c4c884dcbbcf22e5a93d39600(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:36,696 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:36,696 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, priority=12, startTime=1734125495803; duration=0sec 2024-12-13T21:31:36,697 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:36,697 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:C 2024-12-13T21:31:36,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125556730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,733 DEBUG [Thread-1198 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:36,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125556737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125556738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125556739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125556744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-13T21:31:36,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125556940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125556941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125556945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:36,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125556946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,036 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/3777d48b623548aa9540a03cdf62026a 2024-12-13T21:31:37,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/5b13cedfb18348208b3d858c32bbbc07 is 50, key is test_row_0/C:col10/1734125495979/Put/seqid=0 2024-12-13T21:31:37,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742128_1304 (size=12151) 2024-12-13T21:31:37,078 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/5b13cedfb18348208b3d858c32bbbc07 2024-12-13T21:31:37,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/d6270e4e63ac4f9eb133611c1724e953 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d6270e4e63ac4f9eb133611c1724e953 2024-12-13T21:31:37,102 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d6270e4e63ac4f9eb133611c1724e953, entries=150, sequenceid=210, filesize=11.9 K 2024-12-13T21:31:37,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/3777d48b623548aa9540a03cdf62026a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/3777d48b623548aa9540a03cdf62026a 2024-12-13T21:31:37,109 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/3777d48b623548aa9540a03cdf62026a, entries=150, sequenceid=210, filesize=11.9 K 2024-12-13T21:31:37,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/5b13cedfb18348208b3d858c32bbbc07 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5b13cedfb18348208b3d858c32bbbc07 2024-12-13T21:31:37,118 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5b13cedfb18348208b3d858c32bbbc07, entries=150, sequenceid=210, filesize=11.9 K 2024-12-13T21:31:37,119 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 590ms, sequenceid=210, compaction requested=true 2024-12-13T21:31:37,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:37,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-13T21:31:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-13T21:31:37,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-13T21:31:37,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 899 msec 2024-12-13T21:31:37,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 904 msec 2024-12-13T21:31:37,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:37,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:31:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:37,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/5edb26488621406d9ad37eb5b0f10b7d is 50, key is test_row_0/A:col10/1734125496628/Put/seqid=0 2024-12-13T21:31:37,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742129_1305 (size=14541) 2024-12-13T21:31:37,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125557280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125557281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125557281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125557283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-13T21:31:37,324 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-13T21:31:37,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-13T21:31:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T21:31:37,327 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:37,327 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:37,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:37,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125557388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125557388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125557388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125557388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T21:31:37,479 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-13T21:31:37,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:37,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125557590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125557591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125557591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125557591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T21:31:37,632 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-13T21:31:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/5edb26488621406d9ad37eb5b0f10b7d 2024-12-13T21:31:37,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/b7d61b544f164985970c6ec83bdc9180 is 50, key is test_row_0/B:col10/1734125496628/Put/seqid=0 2024-12-13T21:31:37,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742130_1306 (size=12151) 2024-12-13T21:31:37,785 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-13T21:31:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125557893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125557896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125557896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125557898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T21:31:37,938 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:37,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-13T21:31:37,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:37,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:37,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:38,091 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-13T21:31:38,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:38,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:38,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:38,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:38,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:38,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/b7d61b544f164985970c6ec83bdc9180 2024-12-13T21:31:38,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bb08f3ce2602485194a76df949490fe9 is 50, key is test_row_0/C:col10/1734125496628/Put/seqid=0 2024-12-13T21:31:38,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742131_1307 (size=12151) 2024-12-13T21:31:38,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bb08f3ce2602485194a76df949490fe9 2024-12-13T21:31:38,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/5edb26488621406d9ad37eb5b0f10b7d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/5edb26488621406d9ad37eb5b0f10b7d 2024-12-13T21:31:38,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/5edb26488621406d9ad37eb5b0f10b7d, entries=200, sequenceid=237, filesize=14.2 K 2024-12-13T21:31:38,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/b7d61b544f164985970c6ec83bdc9180 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7d61b544f164985970c6ec83bdc9180 2024-12-13T21:31:38,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7d61b544f164985970c6ec83bdc9180, entries=150, sequenceid=237, filesize=11.9 K 2024-12-13T21:31:38,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bb08f3ce2602485194a76df949490fe9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bb08f3ce2602485194a76df949490fe9 2024-12-13T21:31:38,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bb08f3ce2602485194a76df949490fe9, entries=150, sequenceid=237, filesize=11.9 K 2024-12-13T21:31:38,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 975ms, sequenceid=237, compaction requested=true 2024-12-13T21:31:38,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:38,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:38,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:38,221 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:38,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:38,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:38,222 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:38,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:38,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:38,223 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51370 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:38,223 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/A is initiating minor compaction (all files) 2024-12-13T21:31:38,223 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/A in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:38,223 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/84e4255ce2c64492b2a2ab4358d17d89, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d6270e4e63ac4f9eb133611c1724e953, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/5edb26488621406d9ad37eb5b0f10b7d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=50.2 K 2024-12-13T21:31:38,224 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:38,224 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/B is initiating minor compaction (all files) 2024-12-13T21:31:38,224 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/B in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:38,224 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fc84750107974314a2fa4a114e24eac3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/273538143b2e4085b1a9bf43b3dbcfce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/3777d48b623548aa9540a03cdf62026a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7d61b544f164985970c6ec83bdc9180] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=47.8 K 2024-12-13T21:31:38,224 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d2ed0d4d09547a2b6eb4aa7cab12ad7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125495337 2024-12-13T21:31:38,224 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fc84750107974314a2fa4a114e24eac3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125495337 2024-12-13T21:31:38,225 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84e4255ce2c64492b2a2ab4358d17d89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734125495352 2024-12-13T21:31:38,225 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 273538143b2e4085b1a9bf43b3dbcfce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734125495352 2024-12-13T21:31:38,225 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6270e4e63ac4f9eb133611c1724e953, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734125495979 2024-12-13T21:31:38,225 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3777d48b623548aa9540a03cdf62026a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734125495979 2024-12-13T21:31:38,225 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5edb26488621406d9ad37eb5b0f10b7d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125496625 2024-12-13T21:31:38,225 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b7d61b544f164985970c6ec83bdc9180, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125496628 2024-12-13T21:31:38,245 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#B#compaction#257 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:38,246 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/65d178b22a8349dbb6c8bb0148958e0d is 50, key is test_row_0/B:col10/1734125496628/Put/seqid=0 2024-12-13T21:31:38,253 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-13T21:31:38,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:38,254 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:31:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:38,261 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#A#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:38,261 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8cbf36b4885240a4a27f09e8178ac289 is 50, key is test_row_0/A:col10/1734125496628/Put/seqid=0 2024-12-13T21:31:38,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8970dbdfd91545869bf5e4780aac1393 is 50, key is test_row_0/A:col10/1734125497253/Put/seqid=0 2024-12-13T21:31:38,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742132_1308 (size=12663) 2024-12-13T21:31:38,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742133_1309 (size=12663) 2024-12-13T21:31:38,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742134_1310 (size=12151) 2024-12-13T21:31:38,324 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8cbf36b4885240a4a27f09e8178ac289 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8cbf36b4885240a4a27f09e8178ac289 2024-12-13T21:31:38,330 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/A of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 8cbf36b4885240a4a27f09e8178ac289(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:38,330 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:38,330 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, priority=12, startTime=1734125498221; duration=0sec 2024-12-13T21:31:38,330 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:38,330 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:A 2024-12-13T21:31:38,330 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:31:38,331 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:31:38,331 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/C is initiating minor compaction (all files) 2024-12-13T21:31:38,332 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/C in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:38,332 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/7308b73c4c884dcbbcf22e5a93d39600, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/2cfe4d28e96744c680347871e590e892, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5b13cedfb18348208b3d858c32bbbc07, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bb08f3ce2602485194a76df949490fe9] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=47.8 K 2024-12-13T21:31:38,332 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7308b73c4c884dcbbcf22e5a93d39600, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125495337 2024-12-13T21:31:38,332 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cfe4d28e96744c680347871e590e892, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734125495352 2024-12-13T21:31:38,333 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b13cedfb18348208b3d858c32bbbc07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734125495979 2024-12-13T21:31:38,333 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb08f3ce2602485194a76df949490fe9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125496628 2024-12-13T21:31:38,344 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#C#compaction#260 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:38,345 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/73bd314753cf41758b8d24a3192cace4 is 50, key is test_row_0/C:col10/1734125496628/Put/seqid=0 2024-12-13T21:31:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742135_1311 (size=12663) 2024-12-13T21:31:38,378 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/73bd314753cf41758b8d24a3192cace4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/73bd314753cf41758b8d24a3192cace4 2024-12-13T21:31:38,384 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/C of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 73bd314753cf41758b8d24a3192cace4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:38,384 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:38,384 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, priority=12, startTime=1734125498222; duration=0sec 2024-12-13T21:31:38,384 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:38,384 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:C 2024-12-13T21:31:38,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:38,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125558417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125558418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125558419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125558419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T21:31:38,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125558521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125558521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125558522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125558521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,713 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/65d178b22a8349dbb6c8bb0148958e0d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/65d178b22a8349dbb6c8bb0148958e0d 2024-12-13T21:31:38,717 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/B of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 65d178b22a8349dbb6c8bb0148958e0d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:38,717 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:38,717 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, priority=12, startTime=1734125498221; duration=0sec 2024-12-13T21:31:38,717 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:38,717 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:B 2024-12-13T21:31:38,719 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8970dbdfd91545869bf5e4780aac1393 2024-12-13T21:31:38,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125558724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125558724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125558724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/764e2638dd7142eda739625b369c2394 is 50, key is test_row_0/B:col10/1734125497253/Put/seqid=0 2024-12-13T21:31:38,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:38,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125558727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742136_1312 (size=12151) 2024-12-13T21:31:39,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125559026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125559026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125559028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125559030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,132 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/764e2638dd7142eda739625b369c2394 2024-12-13T21:31:39,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3cde268561624024b56954b8bccba7b5 is 50, key is test_row_0/C:col10/1734125497253/Put/seqid=0 2024-12-13T21:31:39,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742137_1313 (size=12151) 2024-12-13T21:31:39,161 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3cde268561624024b56954b8bccba7b5 2024-12-13T21:31:39,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/8970dbdfd91545869bf5e4780aac1393 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8970dbdfd91545869bf5e4780aac1393 2024-12-13T21:31:39,168 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8970dbdfd91545869bf5e4780aac1393, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:31:39,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/764e2638dd7142eda739625b369c2394 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/764e2638dd7142eda739625b369c2394 2024-12-13T21:31:39,172 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/764e2638dd7142eda739625b369c2394, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:31:39,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3cde268561624024b56954b8bccba7b5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3cde268561624024b56954b8bccba7b5 2024-12-13T21:31:39,176 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3cde268561624024b56954b8bccba7b5, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:31:39,177 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 923ms, sequenceid=249, compaction requested=false 2024-12-13T21:31:39,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:39,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-13T21:31:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-13T21:31:39,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-13T21:31:39,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8510 sec 2024-12-13T21:31:39,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.8550 sec 2024-12-13T21:31:39,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T21:31:39,430 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-13T21:31:39,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-13T21:31:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-13T21:31:39,434 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:39,435 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:39,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:39,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:39,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:31:39,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-13T21:31:39,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:39,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/edf034d2c7ca4b36965541d4a80dbf05 is 50, key is test_row_0/A:col10/1734125499529/Put/seqid=0 2024-12-13T21:31:39,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125559542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125559542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125559545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125559546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742138_1314 (size=12301) 2024-12-13T21:31:39,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/edf034d2c7ca4b36965541d4a80dbf05 2024-12-13T21:31:39,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/2338e63cc52c43a880ea7258da5f2ac8 is 50, key is test_row_0/B:col10/1734125499529/Put/seqid=0 2024-12-13T21:31:39,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:39,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:39,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742139_1315 (size=12301) 2024-12-13T21:31:39,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125559647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125559647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125559649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125559649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-13T21:31:39,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:39,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:39,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125559851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125559852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125559853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125559853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:39,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:39,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:39,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:39,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/2338e63cc52c43a880ea7258da5f2ac8 2024-12-13T21:31:40,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/31e2c38a6154484989beeac6d916a713 is 50, key is test_row_0/C:col10/1734125499529/Put/seqid=0 2024-12-13T21:31:40,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742140_1316 (size=12301) 2024-12-13T21:31:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-13T21:31:40,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:40,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:40,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125560155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125560156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125560157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125560158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:40,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:40,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:40,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:40,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/31e2c38a6154484989beeac6d916a713 2024-12-13T21:31:40,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/edf034d2c7ca4b36965541d4a80dbf05 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/edf034d2c7ca4b36965541d4a80dbf05 2024-12-13T21:31:40,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/edf034d2c7ca4b36965541d4a80dbf05, entries=150, sequenceid=278, filesize=12.0 K 2024-12-13T21:31:40,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/2338e63cc52c43a880ea7258da5f2ac8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2338e63cc52c43a880ea7258da5f2ac8 2024-12-13T21:31:40,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2338e63cc52c43a880ea7258da5f2ac8, entries=150, sequenceid=278, filesize=12.0 K 2024-12-13T21:31:40,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/31e2c38a6154484989beeac6d916a713 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/31e2c38a6154484989beeac6d916a713 2024-12-13T21:31:40,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/31e2c38a6154484989beeac6d916a713, entries=150, sequenceid=278, filesize=12.0 K 2024-12-13T21:31:40,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 931ms, sequenceid=278, compaction requested=true 2024-12-13T21:31:40,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:40,462 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:40,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:40,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:40,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:40,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:40,462 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:40,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:40,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:40,463 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:40,463 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/A is initiating minor compaction (all files) 2024-12-13T21:31:40,463 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/A in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,463 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8cbf36b4885240a4a27f09e8178ac289, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8970dbdfd91545869bf5e4780aac1393, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/edf034d2c7ca4b36965541d4a80dbf05] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=36.2 K 2024-12-13T21:31:40,464 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:40,464 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/B is initiating minor compaction (all files) 2024-12-13T21:31:40,464 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/B in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,464 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/65d178b22a8349dbb6c8bb0148958e0d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/764e2638dd7142eda739625b369c2394, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2338e63cc52c43a880ea7258da5f2ac8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=36.2 K 2024-12-13T21:31:40,464 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cbf36b4885240a4a27f09e8178ac289, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125496628 2024-12-13T21:31:40,464 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65d178b22a8349dbb6c8bb0148958e0d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125496628 2024-12-13T21:31:40,465 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8970dbdfd91545869bf5e4780aac1393, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125497253 2024-12-13T21:31:40,465 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 764e2638dd7142eda739625b369c2394, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125497253 2024-12-13T21:31:40,465 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting edf034d2c7ca4b36965541d4a80dbf05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734125498417 2024-12-13T21:31:40,466 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2338e63cc52c43a880ea7258da5f2ac8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734125498417 2024-12-13T21:31:40,475 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#A#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:40,476 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/abe9efdb1bb7487db0d20ca0e40dae5d is 50, key is test_row_0/A:col10/1734125499529/Put/seqid=0 2024-12-13T21:31:40,477 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#B#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:40,477 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/bf340b43c5df4d1aaf2ee194d4c3a221 is 50, key is test_row_0/B:col10/1734125499529/Put/seqid=0 2024-12-13T21:31:40,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,504 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:40,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:40,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742141_1317 (size=12915) 2024-12-13T21:31:40,525 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/bf340b43c5df4d1aaf2ee194d4c3a221 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/bf340b43c5df4d1aaf2ee194d4c3a221 2024-12-13T21:31:40,531 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/B of 3c1b6e03dacebdc2f9aa13c07eb1be8e into bf340b43c5df4d1aaf2ee194d4c3a221(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:40,531 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:40,531 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, priority=13, startTime=1734125500462; duration=0sec 2024-12-13T21:31:40,531 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:40,531 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:B 2024-12-13T21:31:40,531 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:40,532 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:40,532 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/C is initiating minor compaction (all files) 2024-12-13T21:31:40,532 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/C in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:40,532 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/73bd314753cf41758b8d24a3192cace4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3cde268561624024b56954b8bccba7b5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/31e2c38a6154484989beeac6d916a713] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=36.2 K 2024-12-13T21:31:40,533 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73bd314753cf41758b8d24a3192cace4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1734125496628 2024-12-13T21:31:40,533 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cde268561624024b56954b8bccba7b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125497253 2024-12-13T21:31:40,534 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31e2c38a6154484989beeac6d916a713, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734125498417 2024-12-13T21:31:40,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/2fcafe47c0fd4d9caab2658eb9b4dd1f is 50, key is test_row_0/A:col10/1734125499544/Put/seqid=0 2024-12-13T21:31:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-13T21:31:40,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742142_1318 (size=12915) 2024-12-13T21:31:40,583 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#C#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:40,584 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3eac5c23afc644d9b27a184a6647ae1f is 50, key is test_row_0/C:col10/1734125499529/Put/seqid=0 2024-12-13T21:31:40,591 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/abe9efdb1bb7487db0d20ca0e40dae5d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/abe9efdb1bb7487db0d20ca0e40dae5d 2024-12-13T21:31:40,597 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/A of 3c1b6e03dacebdc2f9aa13c07eb1be8e into abe9efdb1bb7487db0d20ca0e40dae5d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:40,598 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:40,598 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, priority=13, startTime=1734125500462; duration=0sec 2024-12-13T21:31:40,598 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:40,598 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:A 2024-12-13T21:31:40,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742143_1319 (size=12301) 2024-12-13T21:31:40,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742144_1320 (size=12915) 2024-12-13T21:31:40,663 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3eac5c23afc644d9b27a184a6647ae1f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3eac5c23afc644d9b27a184a6647ae1f 2024-12-13T21:31:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:40,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:40,669 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/C of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 3eac5c23afc644d9b27a184a6647ae1f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:40,669 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:40,669 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, priority=13, startTime=1734125500462; duration=0sec 2024-12-13T21:31:40,669 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:40,669 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:C 2024-12-13T21:31:40,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125560696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125560700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125560704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125560705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42322 deadline: 1734125560761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,762 DEBUG [Thread-1198 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:31:40,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125560806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125560809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125560809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:40,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125560816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,009 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/2fcafe47c0fd4d9caab2658eb9b4dd1f 2024-12-13T21:31:41,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125561010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125561012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125561013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125561018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/38e75dd213fd496db811cc3fc99ed784 is 50, key is test_row_0/B:col10/1734125499544/Put/seqid=0 2024-12-13T21:31:41,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742145_1321 (size=12301) 2024-12-13T21:31:41,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125561315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125561315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125561317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125561321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,455 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/38e75dd213fd496db811cc3fc99ed784 2024-12-13T21:31:41,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3ae7a5d2a06d4f73a7918efb6802ced3 is 50, key is test_row_0/C:col10/1734125499544/Put/seqid=0 2024-12-13T21:31:41,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742146_1322 (size=12301) 2024-12-13T21:31:41,487 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3ae7a5d2a06d4f73a7918efb6802ced3 2024-12-13T21:31:41,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/2fcafe47c0fd4d9caab2658eb9b4dd1f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2fcafe47c0fd4d9caab2658eb9b4dd1f 2024-12-13T21:31:41,495 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2fcafe47c0fd4d9caab2658eb9b4dd1f, entries=150, sequenceid=289, filesize=12.0 K 2024-12-13T21:31:41,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/38e75dd213fd496db811cc3fc99ed784 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/38e75dd213fd496db811cc3fc99ed784 2024-12-13T21:31:41,500 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/38e75dd213fd496db811cc3fc99ed784, entries=150, sequenceid=289, filesize=12.0 K 2024-12-13T21:31:41,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/3ae7a5d2a06d4f73a7918efb6802ced3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3ae7a5d2a06d4f73a7918efb6802ced3 2024-12-13T21:31:41,504 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3ae7a5d2a06d4f73a7918efb6802ced3, entries=150, sequenceid=289, filesize=12.0 K 2024-12-13T21:31:41,505 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1001ms, sequenceid=289, compaction requested=false 2024-12-13T21:31:41,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:41,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:41,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-13T21:31:41,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-13T21:31:41,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-13T21:31:41,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0710 sec 2024-12-13T21:31:41,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.0870 sec 2024-12-13T21:31:41,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-13T21:31:41,537 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-13T21:31:41,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:41,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-13T21:31:41,539 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:41,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-13T21:31:41,542 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:41,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:41,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-13T21:31:41,693 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-13T21:31:41,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:41,694 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-13T21:31:41,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:41,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:41,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:41,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:41,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:41,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:41,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/d4b310061d2b429a9e5f5a4ed0496c2a is 50, key is test_row_0/A:col10/1734125500693/Put/seqid=0 2024-12-13T21:31:41,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742147_1323 (size=12301) 2024-12-13T21:31:41,705 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/d4b310061d2b429a9e5f5a4ed0496c2a 2024-12-13T21:31:41,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/8b2cbab25f11477d8fe1e47f0c6ddadb is 50, key is test_row_0/B:col10/1734125500693/Put/seqid=0 2024-12-13T21:31:41,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742148_1324 (size=12301) 2024-12-13T21:31:41,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:41,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:41,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125561823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125561823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125561824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125561826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-13T21:31:41,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125561926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125561926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:41,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:41,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125561926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,127 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/8b2cbab25f11477d8fe1e47f0c6ddadb 2024-12-13T21:31:42,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125562128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125562129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125562129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/07922e353caa4e4593199a32d2e526a5 is 50, key is test_row_0/C:col10/1734125500693/Put/seqid=0 2024-12-13T21:31:42,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742149_1325 (size=12301) 2024-12-13T21:31:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-13T21:31:42,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125562433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125562433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125562433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,535 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/07922e353caa4e4593199a32d2e526a5 2024-12-13T21:31:42,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/d4b310061d2b429a9e5f5a4ed0496c2a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d4b310061d2b429a9e5f5a4ed0496c2a 2024-12-13T21:31:42,542 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d4b310061d2b429a9e5f5a4ed0496c2a, entries=150, sequenceid=319, filesize=12.0 K 2024-12-13T21:31:42,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/8b2cbab25f11477d8fe1e47f0c6ddadb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8b2cbab25f11477d8fe1e47f0c6ddadb 2024-12-13T21:31:42,546 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8b2cbab25f11477d8fe1e47f0c6ddadb, entries=150, sequenceid=319, filesize=12.0 K 2024-12-13T21:31:42,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/07922e353caa4e4593199a32d2e526a5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/07922e353caa4e4593199a32d2e526a5 2024-12-13T21:31:42,551 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/07922e353caa4e4593199a32d2e526a5, entries=150, sequenceid=319, filesize=12.0 K 2024-12-13T21:31:42,553 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 859ms, sequenceid=319, compaction requested=true 2024-12-13T21:31:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-13T21:31:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-13T21:31:42,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-13T21:31:42,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0120 sec 2024-12-13T21:31:42,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.0180 sec 2024-12-13T21:31:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-13T21:31:42,642 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-13T21:31:42,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-13T21:31:42,645 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-13T21:31:42,645 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:42,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-13T21:31:42,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:42,797 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:42,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:42,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/f4d336b2b30949be97af9397e68d9d32 is 50, key is test_row_0/A:col10/1734125501823/Put/seqid=0 2024-12-13T21:31:42,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742150_1326 (size=12301) 2024-12-13T21:31:42,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:42,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125562905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125562935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125562937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125562939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-13T21:31:43,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:43,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125563008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:43,204 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/f4d336b2b30949be97af9397e68d9d32 2024-12-13T21:31:43,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/5e67ddf4887e4fc4bb2a08749732c57d is 50, key is test_row_0/B:col10/1734125501823/Put/seqid=0 2024-12-13T21:31:43,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125563211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:43,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742151_1327 (size=12301) 2024-12-13T21:31:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-13T21:31:43,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125563517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:43,615 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/5e67ddf4887e4fc4bb2a08749732c57d 2024-12-13T21:31:43,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/528e7ef899904861923738ee62172e29 is 50, key is test_row_0/C:col10/1734125501823/Put/seqid=0 2024-12-13T21:31:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742152_1328 (size=12301) 2024-12-13T21:31:43,623 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/528e7ef899904861923738ee62172e29 2024-12-13T21:31:43,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/f4d336b2b30949be97af9397e68d9d32 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/f4d336b2b30949be97af9397e68d9d32 2024-12-13T21:31:43,630 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/f4d336b2b30949be97af9397e68d9d32, entries=150, sequenceid=328, filesize=12.0 K 2024-12-13T21:31:43,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/5e67ddf4887e4fc4bb2a08749732c57d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/5e67ddf4887e4fc4bb2a08749732c57d 2024-12-13T21:31:43,636 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/5e67ddf4887e4fc4bb2a08749732c57d, entries=150, sequenceid=328, filesize=12.0 K 2024-12-13T21:31:43,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/528e7ef899904861923738ee62172e29 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/528e7ef899904861923738ee62172e29 2024-12-13T21:31:43,640 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/528e7ef899904861923738ee62172e29, entries=150, sequenceid=328, filesize=12.0 K 2024-12-13T21:31:43,641 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 844ms, sequenceid=328, compaction requested=true 2024-12-13T21:31:43,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-13T21:31:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-13T21:31:43,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-13T21:31:43,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 999 msec 2024-12-13T21:31:43,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.0010 sec 2024-12-13T21:31:43,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-13T21:31:43,748 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-13T21:31:43,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:43,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-13T21:31:43,750 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:43,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T21:31:43,750 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:43,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:43,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T21:31:43,901 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:43,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-13T21:31:43,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:43,901 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-13T21:31:43,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:43,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:43,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:43,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:43,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:43,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:43,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/03f0a52fd8d54a70b924c2cd8206c5ba is 50, key is test_row_0/A:col10/1734125502904/Put/seqid=0 2024-12-13T21:31:43,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742153_1329 (size=12301) 2024-12-13T21:31:43,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:43,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:43,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:43,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125563948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:43,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:43,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125563949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:43,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:43,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125563951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125564020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T21:31:44,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125564052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125564052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125564054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125564255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125564256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125564257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,309 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/03f0a52fd8d54a70b924c2cd8206c5ba 2024-12-13T21:31:44,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/16c04543531e4a1295f5d2b9e65c27a4 is 50, key is test_row_0/B:col10/1734125502904/Put/seqid=0 2024-12-13T21:31:44,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742154_1330 (size=12301) 2024-12-13T21:31:44,324 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/16c04543531e4a1295f5d2b9e65c27a4 2024-12-13T21:31:44,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bf06113fa5e14cc4be601630a73456ff is 50, key is test_row_0/C:col10/1734125502904/Put/seqid=0 2024-12-13T21:31:44,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742155_1331 (size=12301) 2024-12-13T21:31:44,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T21:31:44,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125564559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125564559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125564560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:44,733 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bf06113fa5e14cc4be601630a73456ff 2024-12-13T21:31:44,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/03f0a52fd8d54a70b924c2cd8206c5ba as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/03f0a52fd8d54a70b924c2cd8206c5ba 2024-12-13T21:31:44,741 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/03f0a52fd8d54a70b924c2cd8206c5ba, entries=150, sequenceid=355, filesize=12.0 K 2024-12-13T21:31:44,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/16c04543531e4a1295f5d2b9e65c27a4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/16c04543531e4a1295f5d2b9e65c27a4 2024-12-13T21:31:44,747 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/16c04543531e4a1295f5d2b9e65c27a4, entries=150, sequenceid=355, filesize=12.0 K 2024-12-13T21:31:44,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bf06113fa5e14cc4be601630a73456ff as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bf06113fa5e14cc4be601630a73456ff 2024-12-13T21:31:44,753 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bf06113fa5e14cc4be601630a73456ff, entries=150, sequenceid=355, filesize=12.0 K 2024-12-13T21:31:44,755 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 853ms, sequenceid=355, compaction requested=true 2024-12-13T21:31:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:44,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-13T21:31:44,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-13T21:31:44,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-13T21:31:44,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0060 sec 2024-12-13T21:31:44,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.0120 sec 2024-12-13T21:31:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T21:31:44,853 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-13T21:31:44,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-13T21:31:44,856 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:44,856 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:44,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:44,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-13T21:31:44,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-13T21:31:45,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-13T21:31:45,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:45,009 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-13T21:31:45,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:45,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:45,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:45,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:45,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:45,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:45,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/18e2b3a470bd4a0ca8cefe9818017ccc is 50, key is test_row_0/A:col10/1734125503948/Put/seqid=0 2024-12-13T21:31:45,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. as already flushing 2024-12-13T21:31:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:45,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742156_1332 (size=12301) 2024-12-13T21:31:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125565089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125565089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125565090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125565099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-13T21:31:45,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125565213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125565213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125565215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125565216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125565415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125565416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125565420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125565420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,445 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/18e2b3a470bd4a0ca8cefe9818017ccc 2024-12-13T21:31:45,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/a5be1f9d863b446c80d5e54dbba2794a is 50, key is test_row_0/B:col10/1734125503948/Put/seqid=0 2024-12-13T21:31:45,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742157_1333 (size=12301) 2024-12-13T21:31:45,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-13T21:31:45,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125565720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125565726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125565728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125565730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:45,792 DEBUG [Thread-1211 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df84068 to 127.0.0.1:57927 2024-12-13T21:31:45,792 DEBUG [Thread-1211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:45,793 DEBUG [Thread-1215 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60cea876 to 127.0.0.1:57927 2024-12-13T21:31:45,793 DEBUG [Thread-1215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:45,795 DEBUG [Thread-1213 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x644774bd to 127.0.0.1:57927 2024-12-13T21:31:45,795 DEBUG [Thread-1213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:45,795 DEBUG [Thread-1209 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e5c7476 to 127.0.0.1:57927 2024-12-13T21:31:45,795 DEBUG [Thread-1209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:45,796 DEBUG [Thread-1217 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10011701 to 127.0.0.1:57927 2024-12-13T21:31:45,796 DEBUG [Thread-1217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:45,857 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/a5be1f9d863b446c80d5e54dbba2794a 2024-12-13T21:31:45,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/93f7a839483848adaffa78f95056d70c is 50, key is test_row_0/C:col10/1734125503948/Put/seqid=0 2024-12-13T21:31:45,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742158_1334 (size=12301) 2024-12-13T21:31:45,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-13T21:31:46,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42274 deadline: 1734125566226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:46,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42326 deadline: 1734125566232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:46,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42272 deadline: 1734125566234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:46,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:42358 deadline: 1734125566235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:46,269 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/93f7a839483848adaffa78f95056d70c 2024-12-13T21:31:46,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/18e2b3a470bd4a0ca8cefe9818017ccc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/18e2b3a470bd4a0ca8cefe9818017ccc 2024-12-13T21:31:46,278 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/18e2b3a470bd4a0ca8cefe9818017ccc, entries=150, sequenceid=364, filesize=12.0 K 2024-12-13T21:31:46,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/a5be1f9d863b446c80d5e54dbba2794a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a5be1f9d863b446c80d5e54dbba2794a 2024-12-13T21:31:46,282 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a5be1f9d863b446c80d5e54dbba2794a, entries=150, sequenceid=364, filesize=12.0 K 2024-12-13T21:31:46,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/93f7a839483848adaffa78f95056d70c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/93f7a839483848adaffa78f95056d70c 2024-12-13T21:31:46,286 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/93f7a839483848adaffa78f95056d70c, entries=150, sequenceid=364, filesize=12.0 K 2024-12-13T21:31:46,287 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1278ms, sequenceid=364, compaction requested=true 2024-12-13T21:31:46,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:46,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:46,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-13T21:31:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-13T21:31:46,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-13T21:31:46,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4320 sec 2024-12-13T21:31:46,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.4350 sec 2024-12-13T21:31:46,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-13T21:31:46,961 INFO [Thread-1208 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-13T21:31:47,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:47,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-13T21:31:47,237 DEBUG [Thread-1204 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f50b381 to 127.0.0.1:57927 2024-12-13T21:31:47,237 DEBUG [Thread-1204 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:47,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:47,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:47,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:47,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:47,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:47,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:47,238 DEBUG [Thread-1206 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x124edab0 to 127.0.0.1:57927 2024-12-13T21:31:47,238 DEBUG [Thread-1206 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:47,240 DEBUG [Thread-1200 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e7fc60d to 127.0.0.1:57927 2024-12-13T21:31:47,240 DEBUG [Thread-1200 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:47,242 DEBUG [Thread-1202 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e66ea50 to 127.0.0.1:57927 2024-12-13T21:31:47,242 DEBUG [Thread-1202 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:47,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/260abc3227d043af965bd894cdec0083 is 50, key is test_row_0/A:col10/1734125505075/Put/seqid=0 2024-12-13T21:31:47,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742159_1335 (size=12301) 2024-12-13T21:31:47,354 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:31:47,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/260abc3227d043af965bd894cdec0083 2024-12-13T21:31:47,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/fdf439dc10b8467195c58eb91b7ca849 is 50, key is test_row_0/B:col10/1734125505075/Put/seqid=0 2024-12-13T21:31:47,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742160_1336 (size=12301) 2024-12-13T21:31:48,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/fdf439dc10b8467195c58eb91b7ca849 2024-12-13T21:31:48,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bdb9fa21ecf4424681a917637e82a5f9 is 50, key is test_row_0/C:col10/1734125505075/Put/seqid=0 2024-12-13T21:31:48,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742161_1337 (size=12301) 2024-12-13T21:31:48,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bdb9fa21ecf4424681a917637e82a5f9 2024-12-13T21:31:48,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/260abc3227d043af965bd894cdec0083 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/260abc3227d043af965bd894cdec0083 2024-12-13T21:31:48,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/260abc3227d043af965bd894cdec0083, entries=150, sequenceid=394, filesize=12.0 K 2024-12-13T21:31:48,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/fdf439dc10b8467195c58eb91b7ca849 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fdf439dc10b8467195c58eb91b7ca849 2024-12-13T21:31:48,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fdf439dc10b8467195c58eb91b7ca849, entries=150, sequenceid=394, filesize=12.0 K 2024-12-13T21:31:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/bdb9fa21ecf4424681a917637e82a5f9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bdb9fa21ecf4424681a917637e82a5f9 2024-12-13T21:31:48,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bdb9fa21ecf4424681a917637e82a5f9, entries=150, sequenceid=394, filesize=12.0 K 2024-12-13T21:31:48,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=13.42 KB/13740 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1261ms, sequenceid=394, compaction requested=true 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:48,498 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c1b6e03dacebdc2f9aa13c07eb1be8e:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:48,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:48,498 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 86721 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 86721 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/A is initiating minor compaction (all files) 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/B is initiating minor compaction (all files) 2024-12-13T21:31:48,500 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/B in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:48,500 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/A in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:48,500 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/bf340b43c5df4d1aaf2ee194d4c3a221, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/38e75dd213fd496db811cc3fc99ed784, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8b2cbab25f11477d8fe1e47f0c6ddadb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/5e67ddf4887e4fc4bb2a08749732c57d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/16c04543531e4a1295f5d2b9e65c27a4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a5be1f9d863b446c80d5e54dbba2794a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fdf439dc10b8467195c58eb91b7ca849] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=84.7 K 2024-12-13T21:31:48,500 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/abe9efdb1bb7487db0d20ca0e40dae5d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2fcafe47c0fd4d9caab2658eb9b4dd1f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d4b310061d2b429a9e5f5a4ed0496c2a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/f4d336b2b30949be97af9397e68d9d32, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/03f0a52fd8d54a70b924c2cd8206c5ba, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/18e2b3a470bd4a0ca8cefe9818017ccc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/260abc3227d043af965bd894cdec0083] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=84.7 K 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bf340b43c5df4d1aaf2ee194d4c3a221, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734125498417 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting abe9efdb1bb7487db0d20ca0e40dae5d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734125498417 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e75dd213fd496db811cc3fc99ed784, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125499541 2024-12-13T21:31:48,500 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fcafe47c0fd4d9caab2658eb9b4dd1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125499541 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4b310061d2b429a9e5f5a4ed0496c2a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734125500693 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b2cbab25f11477d8fe1e47f0c6ddadb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734125500693 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e67ddf4887e4fc4bb2a08749732c57d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734125501819 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4d336b2b30949be97af9397e68d9d32, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734125501819 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 16c04543531e4a1295f5d2b9e65c27a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734125502896 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03f0a52fd8d54a70b924c2cd8206c5ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734125502896 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a5be1f9d863b446c80d5e54dbba2794a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734125503947 2024-12-13T21:31:48,501 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18e2b3a470bd4a0ca8cefe9818017ccc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734125503947 2024-12-13T21:31:48,502 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fdf439dc10b8467195c58eb91b7ca849, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125505075 2024-12-13T21:31:48,502 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 260abc3227d043af965bd894cdec0083, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125505075 2024-12-13T21:31:48,516 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#A#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:48,516 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#B#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:48,516 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6863fcc313ea454bb63725cbbed68efc is 50, key is test_row_0/A:col10/1734125505075/Put/seqid=0 2024-12-13T21:31:48,516 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/8271be5b21e24adf8f997e8a4a3f0f52 is 50, key is test_row_0/B:col10/1734125505075/Put/seqid=0 2024-12-13T21:31:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742163_1339 (size=13153) 2024-12-13T21:31:48,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742162_1338 (size=13153) 2024-12-13T21:31:48,928 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/8271be5b21e24adf8f997e8a4a3f0f52 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8271be5b21e24adf8f997e8a4a3f0f52 2024-12-13T21:31:48,928 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6863fcc313ea454bb63725cbbed68efc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6863fcc313ea454bb63725cbbed68efc 2024-12-13T21:31:48,932 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/B of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 8271be5b21e24adf8f997e8a4a3f0f52(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:48,932 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/A of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 6863fcc313ea454bb63725cbbed68efc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:48,932 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/B, priority=9, startTime=1734125508498; duration=0sec 2024-12-13T21:31:48,932 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/A, priority=9, startTime=1734125508498; duration=0sec 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:B 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-12-13T21:31:48,932 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:A 2024-12-13T21:31:48,934 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 86721 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-12-13T21:31:48,934 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 3c1b6e03dacebdc2f9aa13c07eb1be8e/C is initiating minor compaction (all files) 2024-12-13T21:31:48,934 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c1b6e03dacebdc2f9aa13c07eb1be8e/C in TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:48,934 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3eac5c23afc644d9b27a184a6647ae1f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3ae7a5d2a06d4f73a7918efb6802ced3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/07922e353caa4e4593199a32d2e526a5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/528e7ef899904861923738ee62172e29, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bf06113fa5e14cc4be601630a73456ff, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/93f7a839483848adaffa78f95056d70c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bdb9fa21ecf4424681a917637e82a5f9] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp, totalSize=84.7 K 2024-12-13T21:31:48,934 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eac5c23afc644d9b27a184a6647ae1f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734125498417 2024-12-13T21:31:48,934 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ae7a5d2a06d4f73a7918efb6802ced3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125499541 2024-12-13T21:31:48,935 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 07922e353caa4e4593199a32d2e526a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734125500693 2024-12-13T21:31:48,935 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 528e7ef899904861923738ee62172e29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734125501819 2024-12-13T21:31:48,935 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bf06113fa5e14cc4be601630a73456ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734125502896 2024-12-13T21:31:48,935 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 93f7a839483848adaffa78f95056d70c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1734125503947 2024-12-13T21:31:48,936 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bdb9fa21ecf4424681a917637e82a5f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125505075 2024-12-13T21:31:48,947 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c1b6e03dacebdc2f9aa13c07eb1be8e#C#compaction#289 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:48,948 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/605402d8601c4eb981ba19cbba715dfc is 50, key is test_row_0/C:col10/1734125505075/Put/seqid=0 2024-12-13T21:31:48,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742164_1340 (size=13153) 2024-12-13T21:31:49,359 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/605402d8601c4eb981ba19cbba715dfc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/605402d8601c4eb981ba19cbba715dfc 2024-12-13T21:31:49,366 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 3c1b6e03dacebdc2f9aa13c07eb1be8e/C of 3c1b6e03dacebdc2f9aa13c07eb1be8e into 605402d8601c4eb981ba19cbba715dfc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:49,366 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:49,366 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e., storeName=3c1b6e03dacebdc2f9aa13c07eb1be8e/C, priority=9, startTime=1734125508498; duration=0sec 2024-12-13T21:31:49,366 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:49,366 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c1b6e03dacebdc2f9aa13c07eb1be8e:C 2024-12-13T21:31:50,821 DEBUG [Thread-1198 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45426917 to 127.0.0.1:57927 2024-12-13T21:31:50,821 DEBUG [Thread-1198 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:50,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-13T21:31:50,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-12-13T21:31:50,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-12-13T21:31:50,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-12-13T21:31:50,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-13T21:31:50,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7542 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7638 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7307 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7518 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7671 2024-12-13T21:31:50,822 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-13T21:31:50,822 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:31:50,822 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c8a18c7 to 127.0.0.1:57927 2024-12-13T21:31:50,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:31:50,822 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-13T21:31:50,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-12-13T21:31:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:50,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-13T21:31:50,824 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125510824"}]},"ts":"1734125510824"} 2024-12-13T21:31:50,825 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-13T21:31:50,881 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-13T21:31:50,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:31:50,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, UNASSIGN}] 2024-12-13T21:31:50,884 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, UNASSIGN 2024-12-13T21:31:50,884 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=3c1b6e03dacebdc2f9aa13c07eb1be8e, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:50,885 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:31:50,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:31:50,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-13T21:31:51,037 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:51,037 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 3c1b6e03dacebdc2f9aa13c07eb1be8e, disabling compactions & flushes 2024-12-13T21:31:51,038 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. after waiting 0 ms 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:51,038 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 3c1b6e03dacebdc2f9aa13c07eb1be8e 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=A 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=B 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c1b6e03dacebdc2f9aa13c07eb1be8e, store=C 2024-12-13T21:31:51,038 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:51,043 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6ff0104763ce4dedbb43b5fcc8e7b6f6 is 50, key is test_row_0/A:col10/1734125507241/Put/seqid=0 2024-12-13T21:31:51,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742165_1341 (size=9857) 2024-12-13T21:31:51,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-13T21:31:51,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-13T21:31:51,450 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6ff0104763ce4dedbb43b5fcc8e7b6f6 2024-12-13T21:31:51,462 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/037bd719811542408c8b1b6d264e2aad is 50, key is test_row_0/B:col10/1734125507241/Put/seqid=0 2024-12-13T21:31:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742166_1342 (size=9857) 2024-12-13T21:31:51,868 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/037bd719811542408c8b1b6d264e2aad 2024-12-13T21:31:51,880 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/d023943cce174363ae67c093ccd1f8cf is 50, key is test_row_0/C:col10/1734125507241/Put/seqid=0 2024-12-13T21:31:51,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742167_1343 (size=9857) 2024-12-13T21:31:51,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-13T21:31:52,286 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/d023943cce174363ae67c093ccd1f8cf 2024-12-13T21:31:52,290 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/A/6ff0104763ce4dedbb43b5fcc8e7b6f6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ff0104763ce4dedbb43b5fcc8e7b6f6 2024-12-13T21:31:52,292 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ff0104763ce4dedbb43b5fcc8e7b6f6, entries=100, sequenceid=403, filesize=9.6 K 2024-12-13T21:31:52,293 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/B/037bd719811542408c8b1b6d264e2aad as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/037bd719811542408c8b1b6d264e2aad 2024-12-13T21:31:52,297 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/037bd719811542408c8b1b6d264e2aad, entries=100, sequenceid=403, filesize=9.6 K 2024-12-13T21:31:52,298 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/.tmp/C/d023943cce174363ae67c093ccd1f8cf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/d023943cce174363ae67c093ccd1f8cf 2024-12-13T21:31:52,301 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/d023943cce174363ae67c093ccd1f8cf, entries=100, sequenceid=403, filesize=9.6 K 2024-12-13T21:31:52,302 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 3c1b6e03dacebdc2f9aa13c07eb1be8e in 1264ms, sequenceid=403, compaction requested=false 2024-12-13T21:31:52,302 DEBUG [StoreCloser-TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/09d0b5f1303e4bcf9ca735223d96c5da, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8f26ae181c454db2b04155d3e9b5bdce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/070ada7b9c644d16afe85b0f859b537c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/c9453471af4e43bd95456dad04ce9f6f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8d8dba3631bc4540bd0d4bf85d8ede36, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ad226e229534826bf599084ce52386a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/366b0965ea7741f782788ed9c788ece5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/0749edbb26fa47df9d7c43496ecc1e29, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/038c7d3b81964c88b53a7d73553fff9d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/604ca1af6efd431cba4dd078efa0b0b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/10e63c1f0e954b7cb42ad36374736d85, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/84e4255ce2c64492b2a2ab4358d17d89, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d6270e4e63ac4f9eb133611c1724e953, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/5edb26488621406d9ad37eb5b0f10b7d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8cbf36b4885240a4a27f09e8178ac289, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8970dbdfd91545869bf5e4780aac1393, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/abe9efdb1bb7487db0d20ca0e40dae5d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/edf034d2c7ca4b36965541d4a80dbf05, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2fcafe47c0fd4d9caab2658eb9b4dd1f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d4b310061d2b429a9e5f5a4ed0496c2a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/f4d336b2b30949be97af9397e68d9d32, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/03f0a52fd8d54a70b924c2cd8206c5ba, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/18e2b3a470bd4a0ca8cefe9818017ccc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/260abc3227d043af965bd894cdec0083] to archive 2024-12-13T21:31:52,303 DEBUG [StoreCloser-TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ad226e229534826bf599084ce52386a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ad226e229534826bf599084ce52386a 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/09d0b5f1303e4bcf9ca735223d96c5da to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/09d0b5f1303e4bcf9ca735223d96c5da 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/070ada7b9c644d16afe85b0f859b537c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/070ada7b9c644d16afe85b0f859b537c 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/c9453471af4e43bd95456dad04ce9f6f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/c9453471af4e43bd95456dad04ce9f6f 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/0749edbb26fa47df9d7c43496ecc1e29 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/0749edbb26fa47df9d7c43496ecc1e29 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8f26ae181c454db2b04155d3e9b5bdce to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8f26ae181c454db2b04155d3e9b5bdce 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8d8dba3631bc4540bd0d4bf85d8ede36 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8d8dba3631bc4540bd0d4bf85d8ede36 2024-12-13T21:31:52,308 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/366b0965ea7741f782788ed9c788ece5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/366b0965ea7741f782788ed9c788ece5 2024-12-13T21:31:52,309 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/604ca1af6efd431cba4dd078efa0b0b9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/604ca1af6efd431cba4dd078efa0b0b9 2024-12-13T21:31:52,309 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/10e63c1f0e954b7cb42ad36374736d85 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/10e63c1f0e954b7cb42ad36374736d85 2024-12-13T21:31:52,309 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/038c7d3b81964c88b53a7d73553fff9d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/038c7d3b81964c88b53a7d73553fff9d 2024-12-13T21:31:52,309 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2d2ed0d4d09547a2b6eb4aa7cab12ad7 2024-12-13T21:31:52,310 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/84e4255ce2c64492b2a2ab4358d17d89 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/84e4255ce2c64492b2a2ab4358d17d89 2024-12-13T21:31:52,310 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d6270e4e63ac4f9eb133611c1724e953 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d6270e4e63ac4f9eb133611c1724e953 2024-12-13T21:31:52,310 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/5edb26488621406d9ad37eb5b0f10b7d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/5edb26488621406d9ad37eb5b0f10b7d 2024-12-13T21:31:52,310 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8cbf36b4885240a4a27f09e8178ac289 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8cbf36b4885240a4a27f09e8178ac289 2024-12-13T21:31:52,311 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8970dbdfd91545869bf5e4780aac1393 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/8970dbdfd91545869bf5e4780aac1393 2024-12-13T21:31:52,311 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/edf034d2c7ca4b36965541d4a80dbf05 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/edf034d2c7ca4b36965541d4a80dbf05 2024-12-13T21:31:52,311 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2fcafe47c0fd4d9caab2658eb9b4dd1f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/2fcafe47c0fd4d9caab2658eb9b4dd1f 2024-12-13T21:31:52,311 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d4b310061d2b429a9e5f5a4ed0496c2a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/d4b310061d2b429a9e5f5a4ed0496c2a 2024-12-13T21:31:52,312 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/abe9efdb1bb7487db0d20ca0e40dae5d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/abe9efdb1bb7487db0d20ca0e40dae5d 2024-12-13T21:31:52,312 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/f4d336b2b30949be97af9397e68d9d32 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/f4d336b2b30949be97af9397e68d9d32 2024-12-13T21:31:52,312 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/18e2b3a470bd4a0ca8cefe9818017ccc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/18e2b3a470bd4a0ca8cefe9818017ccc 2024-12-13T21:31:52,312 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/03f0a52fd8d54a70b924c2cd8206c5ba to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/03f0a52fd8d54a70b924c2cd8206c5ba 2024-12-13T21:31:52,312 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/260abc3227d043af965bd894cdec0083 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/260abc3227d043af965bd894cdec0083 2024-12-13T21:31:52,313 DEBUG [StoreCloser-TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/678775fbd8d54c6db79d0d542b92d951, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/07c8002855934488b43dff5b49eaca7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ac176cab09f4d9ebb52d28306541910, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a3f904c7eaf847b584105502ce1d8f82, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/1907deb90e0e462b8bddb379dd39fc46, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/4438af6778414d2293ff73c133b6cf71, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/c93b31c2c1214dbe96ee6e03c0b5974b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2a03a123e62b49ce8ba13b18567101c1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/0bf7b33121334c8ebeb4d886de04af39, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ce9c513510849939e797d0370b56cbf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fc84750107974314a2fa4a114e24eac3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7b89913c120470ea4a87db6017c23e8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/273538143b2e4085b1a9bf43b3dbcfce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/3777d48b623548aa9540a03cdf62026a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/65d178b22a8349dbb6c8bb0148958e0d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7d61b544f164985970c6ec83bdc9180, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/764e2638dd7142eda739625b369c2394, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/bf340b43c5df4d1aaf2ee194d4c3a221, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2338e63cc52c43a880ea7258da5f2ac8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/38e75dd213fd496db811cc3fc99ed784, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8b2cbab25f11477d8fe1e47f0c6ddadb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/5e67ddf4887e4fc4bb2a08749732c57d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/16c04543531e4a1295f5d2b9e65c27a4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a5be1f9d863b446c80d5e54dbba2794a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fdf439dc10b8467195c58eb91b7ca849] to archive 2024-12-13T21:31:52,314 DEBUG [StoreCloser-TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:31:52,316 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ac176cab09f4d9ebb52d28306541910 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ac176cab09f4d9ebb52d28306541910 2024-12-13T21:31:52,316 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/4438af6778414d2293ff73c133b6cf71 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/4438af6778414d2293ff73c133b6cf71 2024-12-13T21:31:52,316 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a3f904c7eaf847b584105502ce1d8f82 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a3f904c7eaf847b584105502ce1d8f82 2024-12-13T21:31:52,316 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2a03a123e62b49ce8ba13b18567101c1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2a03a123e62b49ce8ba13b18567101c1 2024-12-13T21:31:52,316 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/678775fbd8d54c6db79d0d542b92d951 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/678775fbd8d54c6db79d0d542b92d951 2024-12-13T21:31:52,317 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/1907deb90e0e462b8bddb379dd39fc46 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/1907deb90e0e462b8bddb379dd39fc46 2024-12-13T21:31:52,317 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/c93b31c2c1214dbe96ee6e03c0b5974b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/c93b31c2c1214dbe96ee6e03c0b5974b 2024-12-13T21:31:52,317 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/07c8002855934488b43dff5b49eaca7e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/07c8002855934488b43dff5b49eaca7e 2024-12-13T21:31:52,318 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ce9c513510849939e797d0370b56cbf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/9ce9c513510849939e797d0370b56cbf 2024-12-13T21:31:52,318 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fc84750107974314a2fa4a114e24eac3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fc84750107974314a2fa4a114e24eac3 2024-12-13T21:31:52,318 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7b89913c120470ea4a87db6017c23e8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7b89913c120470ea4a87db6017c23e8 2024-12-13T21:31:52,318 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/0bf7b33121334c8ebeb4d886de04af39 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/0bf7b33121334c8ebeb4d886de04af39 2024-12-13T21:31:52,318 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/3777d48b623548aa9540a03cdf62026a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/3777d48b623548aa9540a03cdf62026a 2024-12-13T21:31:52,318 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/273538143b2e4085b1a9bf43b3dbcfce to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/273538143b2e4085b1a9bf43b3dbcfce 2024-12-13T21:31:52,319 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/65d178b22a8349dbb6c8bb0148958e0d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/65d178b22a8349dbb6c8bb0148958e0d 2024-12-13T21:31:52,319 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7d61b544f164985970c6ec83bdc9180 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/b7d61b544f164985970c6ec83bdc9180 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/764e2638dd7142eda739625b369c2394 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/764e2638dd7142eda739625b369c2394 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2338e63cc52c43a880ea7258da5f2ac8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/2338e63cc52c43a880ea7258da5f2ac8 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/bf340b43c5df4d1aaf2ee194d4c3a221 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/bf340b43c5df4d1aaf2ee194d4c3a221 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/38e75dd213fd496db811cc3fc99ed784 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/38e75dd213fd496db811cc3fc99ed784 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8b2cbab25f11477d8fe1e47f0c6ddadb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8b2cbab25f11477d8fe1e47f0c6ddadb 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/5e67ddf4887e4fc4bb2a08749732c57d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/5e67ddf4887e4fc4bb2a08749732c57d 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/16c04543531e4a1295f5d2b9e65c27a4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/16c04543531e4a1295f5d2b9e65c27a4 2024-12-13T21:31:52,320 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a5be1f9d863b446c80d5e54dbba2794a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/a5be1f9d863b446c80d5e54dbba2794a 2024-12-13T21:31:52,321 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fdf439dc10b8467195c58eb91b7ca849 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/fdf439dc10b8467195c58eb91b7ca849 2024-12-13T21:31:52,322 DEBUG [StoreCloser-TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5c391935def94feea88045cd0a12d0b8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/9f6a13cc515a4e91b76c3d29be03ed17, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/1ccf5d8d82ce4685bec7fa1ba248b085, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/640ae4cf66d346d3a8d4e2358761eb2a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/51df7499077c46a29781f837d39c167b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/492c11c8304c492689fd7d836cec07a6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/ce17dcd2bc8940348618adb9084e98a5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bfc8afe5524f4a318a3e04ef8299d9df, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/537206e16ddc4463b7c41b29146f4677, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/6ec4a926873544cca888c98e4041c919, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/7308b73c4c884dcbbcf22e5a93d39600, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/02621a6bc36c46199999dcd187f46dd9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/2cfe4d28e96744c680347871e590e892, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5b13cedfb18348208b3d858c32bbbc07, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/73bd314753cf41758b8d24a3192cace4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bb08f3ce2602485194a76df949490fe9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3cde268561624024b56954b8bccba7b5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3eac5c23afc644d9b27a184a6647ae1f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/31e2c38a6154484989beeac6d916a713, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3ae7a5d2a06d4f73a7918efb6802ced3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/07922e353caa4e4593199a32d2e526a5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/528e7ef899904861923738ee62172e29, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bf06113fa5e14cc4be601630a73456ff, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/93f7a839483848adaffa78f95056d70c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bdb9fa21ecf4424681a917637e82a5f9] to archive 2024-12-13T21:31:52,323 DEBUG [StoreCloser-TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5c391935def94feea88045cd0a12d0b8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5c391935def94feea88045cd0a12d0b8 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/1ccf5d8d82ce4685bec7fa1ba248b085 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/1ccf5d8d82ce4685bec7fa1ba248b085 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/640ae4cf66d346d3a8d4e2358761eb2a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/640ae4cf66d346d3a8d4e2358761eb2a 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bfc8afe5524f4a318a3e04ef8299d9df to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bfc8afe5524f4a318a3e04ef8299d9df 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/ce17dcd2bc8940348618adb9084e98a5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/ce17dcd2bc8940348618adb9084e98a5 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/9f6a13cc515a4e91b76c3d29be03ed17 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/9f6a13cc515a4e91b76c3d29be03ed17 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/492c11c8304c492689fd7d836cec07a6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/492c11c8304c492689fd7d836cec07a6 2024-12-13T21:31:52,325 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/51df7499077c46a29781f837d39c167b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/51df7499077c46a29781f837d39c167b 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/6ec4a926873544cca888c98e4041c919 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/6ec4a926873544cca888c98e4041c919 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/02621a6bc36c46199999dcd187f46dd9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/02621a6bc36c46199999dcd187f46dd9 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5b13cedfb18348208b3d858c32bbbc07 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/5b13cedfb18348208b3d858c32bbbc07 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/7308b73c4c884dcbbcf22e5a93d39600 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/7308b73c4c884dcbbcf22e5a93d39600 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/73bd314753cf41758b8d24a3192cace4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/73bd314753cf41758b8d24a3192cace4 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/537206e16ddc4463b7c41b29146f4677 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/537206e16ddc4463b7c41b29146f4677 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bb08f3ce2602485194a76df949490fe9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bb08f3ce2602485194a76df949490fe9 2024-12-13T21:31:52,327 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/2cfe4d28e96744c680347871e590e892 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/2cfe4d28e96744c680347871e590e892 2024-12-13T21:31:52,328 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3cde268561624024b56954b8bccba7b5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3cde268561624024b56954b8bccba7b5 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3eac5c23afc644d9b27a184a6647ae1f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3eac5c23afc644d9b27a184a6647ae1f 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/528e7ef899904861923738ee62172e29 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/528e7ef899904861923738ee62172e29 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/07922e353caa4e4593199a32d2e526a5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/07922e353caa4e4593199a32d2e526a5 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3ae7a5d2a06d4f73a7918efb6802ced3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/3ae7a5d2a06d4f73a7918efb6802ced3 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bf06113fa5e14cc4be601630a73456ff to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bf06113fa5e14cc4be601630a73456ff 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/31e2c38a6154484989beeac6d916a713 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/31e2c38a6154484989beeac6d916a713 2024-12-13T21:31:52,329 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/93f7a839483848adaffa78f95056d70c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/93f7a839483848adaffa78f95056d70c 2024-12-13T21:31:52,330 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bdb9fa21ecf4424681a917637e82a5f9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/bdb9fa21ecf4424681a917637e82a5f9 2024-12-13T21:31:52,334 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/recovered.edits/406.seqid, newMaxSeqId=406, maxSeqId=1 2024-12-13T21:31:52,335 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e. 2024-12-13T21:31:52,335 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 3c1b6e03dacebdc2f9aa13c07eb1be8e: 2024-12-13T21:31:52,336 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:52,337 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=3c1b6e03dacebdc2f9aa13c07eb1be8e, regionState=CLOSED 2024-12-13T21:31:52,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-13T21:31:52,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 3c1b6e03dacebdc2f9aa13c07eb1be8e, server=fd052dae32be,38989,1734125418878 in 1.4530 sec 2024-12-13T21:31:52,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-13T21:31:52,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c1b6e03dacebdc2f9aa13c07eb1be8e, UNASSIGN in 1.4560 sec 2024-12-13T21:31:52,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-13T21:31:52,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4590 sec 2024-12-13T21:31:52,343 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125512342"}]},"ts":"1734125512342"} 2024-12-13T21:31:52,344 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-13T21:31:52,356 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-13T21:31:52,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5350 sec 2024-12-13T21:31:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-13T21:31:52,930 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-13T21:31:52,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-12-13T21:31:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:52,932 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-13T21:31:52,932 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:52,933 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:52,935 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/recovered.edits] 2024-12-13T21:31:52,937 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6863fcc313ea454bb63725cbbed68efc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6863fcc313ea454bb63725cbbed68efc 2024-12-13T21:31:52,937 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ff0104763ce4dedbb43b5fcc8e7b6f6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/A/6ff0104763ce4dedbb43b5fcc8e7b6f6 2024-12-13T21:31:52,939 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/037bd719811542408c8b1b6d264e2aad to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/037bd719811542408c8b1b6d264e2aad 2024-12-13T21:31:52,940 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8271be5b21e24adf8f997e8a4a3f0f52 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/B/8271be5b21e24adf8f997e8a4a3f0f52 2024-12-13T21:31:52,942 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/605402d8601c4eb981ba19cbba715dfc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/605402d8601c4eb981ba19cbba715dfc 2024-12-13T21:31:52,943 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/d023943cce174363ae67c093ccd1f8cf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/C/d023943cce174363ae67c093ccd1f8cf 2024-12-13T21:31:52,945 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/recovered.edits/406.seqid to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e/recovered.edits/406.seqid 2024-12-13T21:31:52,946 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/3c1b6e03dacebdc2f9aa13c07eb1be8e 2024-12-13T21:31:52,946 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-13T21:31:52,948 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:52,953 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-13T21:31:52,956 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-13T21:31:52,957 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:52,957 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-13T21:31:52,957 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734125512957"}]},"ts":"9223372036854775807"} 2024-12-13T21:31:52,959 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T21:31:52,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3c1b6e03dacebdc2f9aa13c07eb1be8e, NAME => 'TestAcidGuarantees,,1734125483542.3c1b6e03dacebdc2f9aa13c07eb1be8e.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T21:31:52,959 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-13T21:31:52,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734125512959"}]},"ts":"9223372036854775807"} 2024-12-13T21:31:52,964 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-13T21:31:52,973 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:52,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 43 msec 2024-12-13T21:31:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-13T21:31:53,033 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-12-13T21:31:53,042 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=247 (was 248), OpenFileDescriptor=461 (was 465), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=444 (was 397) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3608 (was 2635) - AvailableMemoryMB LEAK? - 2024-12-13T21:31:53,049 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=247, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=444, ProcessCount=12, AvailableMemoryMB=3607 2024-12-13T21:31:53,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:31:53,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:31:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:53,052 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:31:53,052 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:53,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-12-13T21:31:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T21:31:53,053 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:31:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742168_1344 (size=963) 2024-12-13T21:31:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T21:31:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T21:31:53,460 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:31:53,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742169_1345 (size=53) 2024-12-13T21:31:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T21:31:53,870 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:31:53,871 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing be31d870e3b01c14f0b712223355e104, disabling compactions & flushes 2024-12-13T21:31:53,871 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:53,871 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:53,871 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. after waiting 0 ms 2024-12-13T21:31:53,871 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:53,871 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:53,871 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:53,873 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:31:53,874 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734125513873"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125513873"}]},"ts":"1734125513873"} 2024-12-13T21:31:53,876 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:31:53,878 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:31:53,878 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125513878"}]},"ts":"1734125513878"} 2024-12-13T21:31:53,880 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-13T21:31:53,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, ASSIGN}] 2024-12-13T21:31:53,900 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, ASSIGN 2024-12-13T21:31:53,901 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:31:54,051 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:54,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:31:54,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T21:31:54,205 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:54,210 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:54,210 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:31:54,211 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,211 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:31:54,211 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,211 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,214 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,216 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:54,216 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be31d870e3b01c14f0b712223355e104 columnFamilyName A 2024-12-13T21:31:54,216 DEBUG [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:54,217 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(327): Store=be31d870e3b01c14f0b712223355e104/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:54,217 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,218 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:54,218 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be31d870e3b01c14f0b712223355e104 columnFamilyName B 2024-12-13T21:31:54,218 DEBUG [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:54,219 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(327): Store=be31d870e3b01c14f0b712223355e104/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:54,219 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,220 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:54,220 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be31d870e3b01c14f0b712223355e104 columnFamilyName C 2024-12-13T21:31:54,220 DEBUG [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:54,220 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(327): Store=be31d870e3b01c14f0b712223355e104/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:54,220 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:54,221 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,221 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,223 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:31:54,224 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:54,226 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:31:54,226 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened be31d870e3b01c14f0b712223355e104; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70646271, jitterRate=0.052711471915245056}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:31:54,227 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:54,227 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., pid=100, masterSystemTime=1734125514204 2024-12-13T21:31:54,228 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:54,228 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:54,229 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:54,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-13T21:31:54,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 in 177 msec 2024-12-13T21:31:54,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-13T21:31:54,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, ASSIGN in 333 msec 2024-12-13T21:31:54,233 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:31:54,233 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125514233"}]},"ts":"1734125514233"} 2024-12-13T21:31:54,234 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-13T21:31:54,247 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:31:54,249 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1960 sec 2024-12-13T21:31:55,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T21:31:55,159 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-13T21:31:55,163 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1acf826f to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@353bcb3d 2024-12-13T21:31:55,173 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20c5edec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:55,175 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:55,176 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:55,177 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:31:55,178 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:31:55,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:31:55,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:31:55,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-13T21:31:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742170_1346 (size=999) 2024-12-13T21:31:55,590 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-13T21:31:55,591 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-13T21:31:55,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:31:55,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, REOPEN/MOVE}] 2024-12-13T21:31:55,594 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, REOPEN/MOVE 2024-12-13T21:31:55,595 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:55,595 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:31:55,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:31:55,747 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:55,749 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:55,749 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:31:55,749 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing be31d870e3b01c14f0b712223355e104, disabling compactions & flushes 2024-12-13T21:31:55,749 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:55,749 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:55,749 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. after waiting 0 ms 2024-12-13T21:31:55,749 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:55,758 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-13T21:31:55,758 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:55,758 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:55,759 WARN [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: be31d870e3b01c14f0b712223355e104 to self. 2024-12-13T21:31:55,760 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:55,761 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=CLOSED 2024-12-13T21:31:55,764 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-13T21:31:55,764 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 in 166 msec 2024-12-13T21:31:55,764 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, REOPEN/MOVE; state=CLOSED, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=true 2024-12-13T21:31:55,915 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:55,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:31:56,069 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,076 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,076 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:31:56,077 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,077 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:31:56,077 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,077 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,079 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,081 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:56,081 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be31d870e3b01c14f0b712223355e104 columnFamilyName A 2024-12-13T21:31:56,083 DEBUG [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:56,083 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(327): Store=be31d870e3b01c14f0b712223355e104/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:56,084 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,084 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:56,084 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be31d870e3b01c14f0b712223355e104 columnFamilyName B 2024-12-13T21:31:56,084 DEBUG [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:56,085 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(327): Store=be31d870e3b01c14f0b712223355e104/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:56,085 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,086 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:31:56,086 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be31d870e3b01c14f0b712223355e104 columnFamilyName C 2024-12-13T21:31:56,086 DEBUG [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:56,086 INFO [StoreOpener-be31d870e3b01c14f0b712223355e104-1 {}] regionserver.HStore(327): Store=be31d870e3b01c14f0b712223355e104/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:31:56,086 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,087 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,088 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,089 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:31:56,090 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,091 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened be31d870e3b01c14f0b712223355e104; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61007157, jitterRate=-0.0909225195646286}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:31:56,092 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:56,092 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., pid=105, masterSystemTime=1734125516069 2024-12-13T21:31:56,093 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,093 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,094 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=OPEN, openSeqNum=5, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-12-13T21:31:56,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 in 178 msec 2024-12-13T21:31:56,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-13T21:31:56,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, REOPEN/MOVE in 502 msec 2024-12-13T21:31:56,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-13T21:31:56,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 507 msec 2024-12-13T21:31:56,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 921 msec 2024-12-13T21:31:56,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-13T21:31:56,106 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1cbce2b4 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77b5b03d 2024-12-13T21:31:56,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@789089aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,180 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4fd3f5fc to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15bd9063 2024-12-13T21:31:56,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@699c96a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,197 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05c97513 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c0ec341 2024-12-13T21:31:56,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@712a5bc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,208 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42af2962 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4831febd 2024-12-13T21:31:56,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b660061, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,216 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5910b8c7 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e93614e 2024-12-13T21:31:56,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45ad0ff5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,224 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0523025d to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28dc77ab 2024-12-13T21:31:56,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a259e93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b9a1701 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70304ef6 2024-12-13T21:31:56,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6179765, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,241 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d3b05cf to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f8ea360 2024-12-13T21:31:56,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3518b14b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,250 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x170d29d0 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6de21df2 2024-12-13T21:31:56,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d806bec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,259 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b61f1c4 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@461176d7 2024-12-13T21:31:56,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5850aaf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:31:56,276 DEBUG [hconnection-0x40424da4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:56,278 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-13T21:31:56,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-13T21:31:56,279 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:56,279 DEBUG [hconnection-0x70e40eed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,279 DEBUG [hconnection-0x742f0a62-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,279 DEBUG [hconnection-0x164e1471-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,280 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:56,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:56,280 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,280 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,280 DEBUG [hconnection-0x1900e2d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,280 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,281 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,283 DEBUG [hconnection-0x4d1a23b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,283 DEBUG [hconnection-0x111403b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,283 DEBUG [hconnection-0xcd2da43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,284 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,284 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,285 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,286 DEBUG [hconnection-0x26cd340e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,287 DEBUG [hconnection-0x35cab96f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:31:56,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:31:56,288 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:31:56,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:56,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:31:56,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:56,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:31:56,288 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:31:56,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:56,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125576300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125576300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125576300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125576301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125576301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121314f67d7fcdd9421181317617034b5a94_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125516285/Put/seqid=0 2024-12-13T21:31:56,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742171_1347 (size=14594) 2024-12-13T21:31:56,330 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:56,334 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121314f67d7fcdd9421181317617034b5a94_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121314f67d7fcdd9421181317617034b5a94_be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,337 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bcffbeac7892468bbb7884db9c37f5d4, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:56,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bcffbeac7892468bbb7884db9c37f5d4 is 175, key is test_row_0/A:col10/1734125516285/Put/seqid=0 2024-12-13T21:31:56,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742172_1348 (size=39549) 2024-12-13T21:31:56,349 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bcffbeac7892468bbb7884db9c37f5d4 2024-12-13T21:31:56,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/9e1cc48b12c74995a67ec9925c8ee179 is 50, key is test_row_0/B:col10/1734125516285/Put/seqid=0 2024-12-13T21:31:56,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-13T21:31:56,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742173_1349 (size=12001) 2024-12-13T21:31:56,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125576402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125576402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125576402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125576402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125576402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-13T21:31:56,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:56,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-13T21:31:56,584 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-13T21:31:56,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:56,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125576603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125576603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125576604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125576604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125576604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-13T21:31:56,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:56,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/9e1cc48b12c74995a67ec9925c8ee179 2024-12-13T21:31:56,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a468ba7e5d2a46519cd749fb55dc38d0 is 50, key is test_row_0/C:col10/1734125516285/Put/seqid=0 2024-12-13T21:31:56,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742174_1350 (size=12001) 2024-12-13T21:31:56,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a468ba7e5d2a46519cd749fb55dc38d0 2024-12-13T21:31:56,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bcffbeac7892468bbb7884db9c37f5d4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4 2024-12-13T21:31:56,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4, entries=200, sequenceid=17, filesize=38.6 K 2024-12-13T21:31:56,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/9e1cc48b12c74995a67ec9925c8ee179 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9e1cc48b12c74995a67ec9925c8ee179 2024-12-13T21:31:56,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9e1cc48b12c74995a67ec9925c8ee179, entries=150, sequenceid=17, filesize=11.7 K 2024-12-13T21:31:56,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a468ba7e5d2a46519cd749fb55dc38d0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a468ba7e5d2a46519cd749fb55dc38d0 2024-12-13T21:31:56,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a468ba7e5d2a46519cd749fb55dc38d0, entries=150, sequenceid=17, filesize=11.7 K 2024-12-13T21:31:56,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for be31d870e3b01c14f0b712223355e104 in 553ms, sequenceid=17, compaction requested=false 2024-12-13T21:31:56,840 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-13T21:31:56,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:56,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-13T21:31:56,889 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-13T21:31:56,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:56,890 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:31:56,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:31:56,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:56,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:31:56,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:56,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:31:56,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:56,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213499076b1e46843f78b21a3b13bc52708_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125516299/Put/seqid=0 2024-12-13T21:31:56,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742175_1351 (size=12154) 2024-12-13T21:31:56,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:56,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125576916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125576921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125576923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125576925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,929 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213499076b1e46843f78b21a3b13bc52708_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213499076b1e46843f78b21a3b13bc52708_be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:56,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:56,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125576926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:56,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/87ccb5d61f0e454e98c3c9c4ddd6b50e, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:56,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/87ccb5d61f0e454e98c3c9c4ddd6b50e is 175, key is test_row_0/A:col10/1734125516299/Put/seqid=0 2024-12-13T21:31:56,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742176_1352 (size=30955) 2024-12-13T21:31:56,959 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/87ccb5d61f0e454e98c3c9c4ddd6b50e 2024-12-13T21:31:56,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/3f2df5caf1de4eedb5f910906c330298 is 50, key is test_row_0/B:col10/1734125516299/Put/seqid=0 2024-12-13T21:31:56,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742177_1353 (size=12001) 2024-12-13T21:31:56,980 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/3f2df5caf1de4eedb5f910906c330298 2024-12-13T21:31:56,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/90a6fedc5f2b4f56927d6d16d2a38d7e is 50, key is test_row_0/C:col10/1734125516299/Put/seqid=0 2024-12-13T21:31:57,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742178_1354 (size=12001) 2024-12-13T21:31:57,018 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/90a6fedc5f2b4f56927d6d16d2a38d7e 2024-12-13T21:31:57,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/87ccb5d61f0e454e98c3c9c4ddd6b50e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e 2024-12-13T21:31:57,027 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e, entries=150, sequenceid=41, filesize=30.2 K 2024-12-13T21:31:57,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/3f2df5caf1de4eedb5f910906c330298 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3f2df5caf1de4eedb5f910906c330298 2024-12-13T21:31:57,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125577028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125577028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,033 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3f2df5caf1de4eedb5f910906c330298, entries=150, sequenceid=41, filesize=11.7 K 2024-12-13T21:31:57,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125577029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/90a6fedc5f2b4f56927d6d16d2a38d7e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/90a6fedc5f2b4f56927d6d16d2a38d7e 2024-12-13T21:31:57,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125577030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,039 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/90a6fedc5f2b4f56927d6d16d2a38d7e, entries=150, sequenceid=41, filesize=11.7 K 2024-12-13T21:31:57,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125577035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,040 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for be31d870e3b01c14f0b712223355e104 in 149ms, sequenceid=41, compaction requested=false 2024-12-13T21:31:57,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:57,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-13T21:31:57,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-13T21:31:57,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-13T21:31:57,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 761 msec 2024-12-13T21:31:57,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 765 msec 2024-12-13T21:31:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:57,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:31:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:31:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:31:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:31:57,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:57,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121332ca8e8341d746fb84ad9cc92660ab36_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:57,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742179_1355 (size=14594) 2024-12-13T21:31:57,261 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:57,264 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121332ca8e8341d746fb84ad9cc92660ab36_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121332ca8e8341d746fb84ad9cc92660ab36_be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:57,265 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/3b08a57e1c62448dadd3aa9fe8baa183, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:57,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/3b08a57e1c62448dadd3aa9fe8baa183 is 175, key is test_row_0/A:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:57,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125577261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125577261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125577262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125577268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125577269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742180_1356 (size=39549) 2024-12-13T21:31:57,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125577370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125577370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125577370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-13T21:31:57,382 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-13T21:31:57,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125577377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:57,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125577378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-13T21:31:57,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-13T21:31:57,384 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:57,385 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:57,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-13T21:31:57,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-13T21:31:57,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:57,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125577576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125577577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125577577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125577584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125577585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-13T21:31:57,688 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-13T21:31:57,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:57,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:57,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:57,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:31:57,701 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/3b08a57e1c62448dadd3aa9fe8baa183 2024-12-13T21:31:57,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/add45aebfd4d4d9fb8463811454cc661 is 50, key is test_row_0/B:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:57,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742181_1357 (size=12001) 2024-12-13T21:31:57,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/add45aebfd4d4d9fb8463811454cc661 2024-12-13T21:31:57,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/b208797d89484f97beb2d327932f3f4a is 50, key is test_row_0/C:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:57,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742182_1358 (size=12001) 2024-12-13T21:31:57,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/b208797d89484f97beb2d327932f3f4a 2024-12-13T21:31:57,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/3b08a57e1c62448dadd3aa9fe8baa183 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183 2024-12-13T21:31:57,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183, entries=200, sequenceid=54, filesize=38.6 K 2024-12-13T21:31:57,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/add45aebfd4d4d9fb8463811454cc661 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/add45aebfd4d4d9fb8463811454cc661 2024-12-13T21:31:57,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/add45aebfd4d4d9fb8463811454cc661, entries=150, sequenceid=54, filesize=11.7 K 2024-12-13T21:31:57,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/b208797d89484f97beb2d327932f3f4a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b208797d89484f97beb2d327932f3f4a 2024-12-13T21:31:57,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b208797d89484f97beb2d327932f3f4a, entries=150, sequenceid=54, filesize=11.7 K 2024-12-13T21:31:57,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for be31d870e3b01c14f0b712223355e104 in 515ms, sequenceid=54, compaction requested=true 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:57,751 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:31:57,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:31:57,751 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110053 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:31:57,753 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,753 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,753 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9e1cc48b12c74995a67ec9925c8ee179, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3f2df5caf1de4eedb5f910906c330298, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/add45aebfd4d4d9fb8463811454cc661] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=35.2 K 2024-12-13T21:31:57,753 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=107.5 K 2024-12-13T21:31:57,753 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183] 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e1cc48b12c74995a67ec9925c8ee179, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734125516285 2024-12-13T21:31:57,753 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcffbeac7892468bbb7884db9c37f5d4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734125516283 2024-12-13T21:31:57,754 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f2df5caf1de4eedb5f910906c330298, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734125516299 2024-12-13T21:31:57,754 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87ccb5d61f0e454e98c3c9c4ddd6b50e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734125516299 2024-12-13T21:31:57,754 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting add45aebfd4d4d9fb8463811454cc661, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734125516921 2024-12-13T21:31:57,754 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b08a57e1c62448dadd3aa9fe8baa183, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734125516912 2024-12-13T21:31:57,762 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:57,762 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/20b0c3f22aed4050b530eb3e99209eca is 50, key is test_row_0/B:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:57,765 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:57,770 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412136193cbaa7e6e4365a3bb1b69beaf91b9_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:57,773 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412136193cbaa7e6e4365a3bb1b69beaf91b9_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:57,773 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412136193cbaa7e6e4365a3bb1b69beaf91b9_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:57,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742184_1360 (size=4469) 2024-12-13T21:31:57,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742183_1359 (size=12104) 2024-12-13T21:31:57,841 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-13T21:31:57,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:57,841 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-13T21:31:57,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:31:57,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:57,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:31:57,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:57,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:31:57,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:57,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f591f3dea6d9446499bda216a947e272_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125517267/Put/seqid=0 2024-12-13T21:31:57,863 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:31:57,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742185_1361 (size=12154) 2024-12-13T21:31:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:57,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:31:57,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125577902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125577903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125577910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125577911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125577911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:57,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-13T21:31:58,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125578012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125578013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125578017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125578017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125578017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,190 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#303 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:58,191 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/b68a2e43e4f444a6a44fd6bc9ab37707 is 175, key is test_row_0/A:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742186_1362 (size=31058) 2024-12-13T21:31:58,195 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/20b0c3f22aed4050b530eb3e99209eca as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/20b0c3f22aed4050b530eb3e99209eca 2024-12-13T21:31:58,199 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into 20b0c3f22aed4050b530eb3e99209eca(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:58,199 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:58,199 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125517751; duration=0sec 2024-12-13T21:31:58,199 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:31:58,199 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:31:58,200 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:31:58,200 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:31:58,200 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:31:58,200 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:58,200 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a468ba7e5d2a46519cd749fb55dc38d0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/90a6fedc5f2b4f56927d6d16d2a38d7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b208797d89484f97beb2d327932f3f4a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=35.2 K 2024-12-13T21:31:58,201 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a468ba7e5d2a46519cd749fb55dc38d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734125516285 2024-12-13T21:31:58,201 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 90a6fedc5f2b4f56927d6d16d2a38d7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734125516299 2024-12-13T21:31:58,201 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b208797d89484f97beb2d327932f3f4a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734125516921 2024-12-13T21:31:58,206 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:31:58,206 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/6deb2140bcc9436eb2f81ed576163611 is 50, key is test_row_0/C:col10/1734125516925/Put/seqid=0 2024-12-13T21:31:58,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742187_1363 (size=12104) 2024-12-13T21:31:58,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125578216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125578217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125578222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125578222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125578223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:58,271 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f591f3dea6d9446499bda216a947e272_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f591f3dea6d9446499bda216a947e272_be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:58,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fe8bffef6e544a51b7f1c230485767b6, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:58,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fe8bffef6e544a51b7f1c230485767b6 is 175, key is test_row_0/A:col10/1734125517267/Put/seqid=0 2024-12-13T21:31:58,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742188_1364 (size=30955) 2024-12-13T21:31:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-13T21:31:58,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125578522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125578522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125578528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125578529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125578529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:58,598 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/b68a2e43e4f444a6a44fd6bc9ab37707 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/b68a2e43e4f444a6a44fd6bc9ab37707 2024-12-13T21:31:58,602 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into b68a2e43e4f444a6a44fd6bc9ab37707(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:58,603 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:58,603 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125517751; duration=0sec 2024-12-13T21:31:58,603 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:58,603 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:31:58,613 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/6deb2140bcc9436eb2f81ed576163611 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/6deb2140bcc9436eb2f81ed576163611 2024-12-13T21:31:58,617 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into 6deb2140bcc9436eb2f81ed576163611(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:31:58,617 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:58,617 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125517751; duration=0sec 2024-12-13T21:31:58,617 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:31:58,617 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:31:58,676 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fe8bffef6e544a51b7f1c230485767b6 2024-12-13T21:31:58,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/5eebcddf3ad14f7389d9a4f9a886051c is 50, key is test_row_0/B:col10/1734125517267/Put/seqid=0 2024-12-13T21:31:58,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742189_1365 (size=12001) 2024-12-13T21:31:59,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125579027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:59,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125579027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:59,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:59,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125579033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:59,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125579033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:59,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:31:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125579034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:31:59,086 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/5eebcddf3ad14f7389d9a4f9a886051c 2024-12-13T21:31:59,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5a6a3b3437754b719c3a8a4d93539a21 is 50, key is test_row_0/C:col10/1734125517267/Put/seqid=0 2024-12-13T21:31:59,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742190_1366 (size=12001) 2024-12-13T21:31:59,126 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5a6a3b3437754b719c3a8a4d93539a21 2024-12-13T21:31:59,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fe8bffef6e544a51b7f1c230485767b6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6 2024-12-13T21:31:59,135 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6, entries=150, sequenceid=77, filesize=30.2 K 2024-12-13T21:31:59,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/5eebcddf3ad14f7389d9a4f9a886051c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/5eebcddf3ad14f7389d9a4f9a886051c 2024-12-13T21:31:59,140 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/5eebcddf3ad14f7389d9a4f9a886051c, entries=150, sequenceid=77, filesize=11.7 K 2024-12-13T21:31:59,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5a6a3b3437754b719c3a8a4d93539a21 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a6a3b3437754b719c3a8a4d93539a21 2024-12-13T21:31:59,145 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a6a3b3437754b719c3a8a4d93539a21, entries=150, sequenceid=77, filesize=11.7 K 2024-12-13T21:31:59,145 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for be31d870e3b01c14f0b712223355e104 in 1304ms, sequenceid=77, compaction requested=false 2024-12-13T21:31:59,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:59,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:59,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-13T21:31:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-13T21:31:59,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-13T21:31:59,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7620 sec 2024-12-13T21:31:59,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.7660 sec 2024-12-13T21:31:59,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-13T21:31:59,488 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-13T21:31:59,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:31:59,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-13T21:31:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T21:31:59,491 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:31:59,492 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:31:59,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:31:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T21:31:59,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:31:59,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-13T21:31:59,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:59,644 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:31:59,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:31:59,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:59,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:31:59,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:59,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:31:59,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:31:59,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412133b3086c6a1ea43fc84b457a72f86a706_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125517902/Put/seqid=0 2024-12-13T21:31:59,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742191_1367 (size=12154) 2024-12-13T21:31:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,675 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412133b3086c6a1ea43fc84b457a72f86a706_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412133b3086c6a1ea43fc84b457a72f86a706_be31d870e3b01c14f0b712223355e104 2024-12-13T21:31:59,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/cefae8128b244ccc991b756bedc18d3c, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:31:59,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/cefae8128b244ccc991b756bedc18d3c is 175, key is test_row_0/A:col10/1734125517902/Put/seqid=0 2024-12-13T21:31:59,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742192_1368 (size=30955) 2024-12-13T21:31:59,705 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/cefae8128b244ccc991b756bedc18d3c 2024-12-13T21:31:59,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/b6be3758ebd2429faffab1c54854d167 is 50, key is test_row_0/B:col10/1734125517902/Put/seqid=0 2024-12-13T21:31:59,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742193_1369 (size=12001) 2024-12-13T21:31:59,769 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/b6be3758ebd2429faffab1c54854d167 2024-12-13T21:31:59,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/4e803a4b54a341eeb6e40b20a1b47a61 is 50, key is test_row_0/C:col10/1734125517902/Put/seqid=0 2024-12-13T21:31:59,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742194_1370 (size=12001) 2024-12-13T21:31:59,785 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/4e803a4b54a341eeb6e40b20a1b47a61 2024-12-13T21:31:59,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/cefae8128b244ccc991b756bedc18d3c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c 2024-12-13T21:31:59,792 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c, entries=150, sequenceid=93, filesize=30.2 K 2024-12-13T21:31:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T21:31:59,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/b6be3758ebd2429faffab1c54854d167 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b6be3758ebd2429faffab1c54854d167 2024-12-13T21:31:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,796 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b6be3758ebd2429faffab1c54854d167, entries=150, sequenceid=93, filesize=11.7 K 2024-12-13T21:31:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/4e803a4b54a341eeb6e40b20a1b47a61 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/4e803a4b54a341eeb6e40b20a1b47a61 2024-12-13T21:31:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,802 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/4e803a4b54a341eeb6e40b20a1b47a61, entries=150, sequenceid=93, filesize=11.7 K 2024-12-13T21:31:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,803 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for be31d870e3b01c14f0b712223355e104 in 159ms, sequenceid=93, compaction requested=true 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:31:59,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:31:59,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-13T21:31:59,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 312 msec 2024-12-13T21:31:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 317 msec 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:31:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T21:32:00,093 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-13T21:32:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-13T21:32:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,096 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,097 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:00,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-13T21:32:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-13T21:32:00,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:32:00,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:00,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:00,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:00,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:00,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:00,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213a0f52d7540c8474fbd143e7f1ce3ece2_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:00,250 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:00,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:00,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,250 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742195_1371 (size=19474) 2024-12-13T21:32:00,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125580279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125580281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125580281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125580282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125580283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-13T21:32:00,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125580395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125580397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125580397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:00,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125580398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125580398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,556 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125580602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125580604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125580605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125580605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125580606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,665 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:00,668 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213a0f52d7540c8474fbd143e7f1ce3ece2_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a0f52d7540c8474fbd143e7f1ce3ece2_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:00,669 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1df51adabf5743e3b5c7e2ff28ba32ce, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:00,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1df51adabf5743e3b5c7e2ff28ba32ce is 175, key is test_row_0/A:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:00,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-13T21:32:00,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742196_1372 (size=56733) 2024-12-13T21:32:00,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:00,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:00,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:00,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:00,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:00,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:00,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125580910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125580910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125580911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125580912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:00,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125580912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:01,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:01,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:01,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:01,105 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=105, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1df51adabf5743e3b5c7e2ff28ba32ce 2024-12-13T21:32:01,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/d4f03c70775e4d24b4444347ca48888d is 50, key is test_row_0/B:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:01,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742197_1373 (size=12001) 2024-12-13T21:32:01,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/d4f03c70775e4d24b4444347ca48888d 2024-12-13T21:32:01,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a22f91380ec04f1a9a9996094265633e is 50, key is test_row_0/C:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:01,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742198_1374 (size=12001) 2024-12-13T21:32:01,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a22f91380ec04f1a9a9996094265633e 2024-12-13T21:32:01,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1df51adabf5743e3b5c7e2ff28ba32ce as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce 2024-12-13T21:32:01,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce, entries=300, sequenceid=105, filesize=55.4 K 2024-12-13T21:32:01,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/d4f03c70775e4d24b4444347ca48888d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d4f03c70775e4d24b4444347ca48888d 2024-12-13T21:32:01,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d4f03c70775e4d24b4444347ca48888d, entries=150, sequenceid=105, filesize=11.7 K 2024-12-13T21:32:01,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a22f91380ec04f1a9a9996094265633e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a22f91380ec04f1a9a9996094265633e 2024-12-13T21:32:01,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:01,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:01,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:01,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:01,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a22f91380ec04f1a9a9996094265633e, entries=150, sequenceid=105, filesize=11.7 K 2024-12-13T21:32:01,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for be31d870e3b01c14f0b712223355e104 in 974ms, sequenceid=105, compaction requested=true 2024-12-13T21:32:01,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:01,174 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:32:01,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:01,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:01,174 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:32:01,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:01,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:01,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:01,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:01,176 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:32:01,176 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:01,176 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,176 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/20b0c3f22aed4050b530eb3e99209eca, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/5eebcddf3ad14f7389d9a4f9a886051c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b6be3758ebd2429faffab1c54854d167, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d4f03c70775e4d24b4444347ca48888d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=47.0 K 2024-12-13T21:32:01,176 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 149701 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:32:01,176 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:01,177 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,177 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/b68a2e43e4f444a6a44fd6bc9ab37707, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=146.2 K 2024-12-13T21:32:01,177 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,177 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/b68a2e43e4f444a6a44fd6bc9ab37707, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce] 2024-12-13T21:32:01,177 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 20b0c3f22aed4050b530eb3e99209eca, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734125516921 2024-12-13T21:32:01,177 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b68a2e43e4f444a6a44fd6bc9ab37707, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734125516921 2024-12-13T21:32:01,178 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eebcddf3ad14f7389d9a4f9a886051c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734125517258 2024-12-13T21:32:01,178 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe8bffef6e544a51b7f1c230485767b6, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734125517258 2024-12-13T21:32:01,178 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b6be3758ebd2429faffab1c54854d167, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734125517894 2024-12-13T21:32:01,178 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting cefae8128b244ccc991b756bedc18d3c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734125517894 2024-12-13T21:32:01,179 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1df51adabf5743e3b5c7e2ff28ba32ce, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734125520118 2024-12-13T21:32:01,179 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d4f03c70775e4d24b4444347ca48888d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734125520121 2024-12-13T21:32:01,201 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-13T21:32:01,206 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#315 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:01,208 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/6a943d36b84345f791dce9cdaab8f058 is 50, key is test_row_0/B:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:01,212 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121362c8ba518d2e441d8b24052fc9775ceb_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:01,213 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121362c8ba518d2e441d8b24052fc9775ceb_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:01,213 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121362c8ba518d2e441d8b24052fc9775ceb_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:01,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742199_1375 (size=12241) 2024-12-13T21:32:01,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742200_1376 (size=4469) 2024-12-13T21:32:01,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,324 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:01,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121301f3a2414c2548fcb4dc7c9f51551d56_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125520281/Put/seqid=0 2024-12-13T21:32:01,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742201_1377 (size=12154) 2024-12-13T21:32:01,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:01,346 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121301f3a2414c2548fcb4dc7c9f51551d56_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121301f3a2414c2548fcb4dc7c9f51551d56_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:01,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d202d7abd8e46bb928948fee54c02a3, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:01,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d202d7abd8e46bb928948fee54c02a3 is 175, key is test_row_0/A:col10/1734125520281/Put/seqid=0 2024-12-13T21:32:01,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742202_1378 (size=30955) 2024-12-13T21:32:01,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:01,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:01,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125581429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125581429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125581430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125581430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125581434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125581535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125581539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125581539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125581540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125581540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,632 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/6a943d36b84345f791dce9cdaab8f058 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/6a943d36b84345f791dce9cdaab8f058 2024-12-13T21:32:01,635 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into 6a943d36b84345f791dce9cdaab8f058(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:01,635 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:01,635 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=12, startTime=1734125521174; duration=0sec 2024-12-13T21:32:01,635 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:01,635 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:01,636 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:32:01,637 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:32:01,637 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:01,637 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#314 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:01,637 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,637 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/6deb2140bcc9436eb2f81ed576163611, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a6a3b3437754b719c3a8a4d93539a21, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/4e803a4b54a341eeb6e40b20a1b47a61, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a22f91380ec04f1a9a9996094265633e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=47.0 K 2024-12-13T21:32:01,637 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6deb2140bcc9436eb2f81ed576163611, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734125516921 2024-12-13T21:32:01,637 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1b286a45307e4df9a07a4bfc98e61dc2 is 175, key is test_row_0/A:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:01,637 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a6a3b3437754b719c3a8a4d93539a21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734125517258 2024-12-13T21:32:01,638 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e803a4b54a341eeb6e40b20a1b47a61, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734125517894 2024-12-13T21:32:01,638 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a22f91380ec04f1a9a9996094265633e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734125520121 2024-12-13T21:32:01,646 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#317 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:01,646 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/505706b0d93342ae87bc42e8a817198b is 50, key is test_row_0/C:col10/1734125520198/Put/seqid=0 2024-12-13T21:32:01,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742203_1379 (size=31195) 2024-12-13T21:32:01,651 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1b286a45307e4df9a07a4bfc98e61dc2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1b286a45307e4df9a07a4bfc98e61dc2 2024-12-13T21:32:01,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742204_1380 (size=12241) 2024-12-13T21:32:01,656 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into 1b286a45307e4df9a07a4bfc98e61dc2(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:01,656 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:01,656 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=12, startTime=1734125521174; duration=0sec 2024-12-13T21:32:01,656 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:01,656 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:01,657 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/505706b0d93342ae87bc42e8a817198b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/505706b0d93342ae87bc42e8a817198b 2024-12-13T21:32:01,660 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into 505706b0d93342ae87bc42e8a817198b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:01,660 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:01,660 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=12, startTime=1734125521175; duration=0sec 2024-12-13T21:32:01,660 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:01,660 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:01,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125581739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125581745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125581745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125581746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125581746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:01,774 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d202d7abd8e46bb928948fee54c02a3 2024-12-13T21:32:01,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/60cbf8b892d14301a006cf6ca9dd97d7 is 50, key is test_row_0/B:col10/1734125520281/Put/seqid=0 2024-12-13T21:32:01,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742205_1381 (size=12001) 2024-12-13T21:32:01,793 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/60cbf8b892d14301a006cf6ca9dd97d7 2024-12-13T21:32:01,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/b7b0292b6f874c1096033298e998a2fc is 50, key is test_row_0/C:col10/1734125520281/Put/seqid=0 2024-12-13T21:32:01,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742206_1382 (size=12001) 2024-12-13T21:32:01,803 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/b7b0292b6f874c1096033298e998a2fc 2024-12-13T21:32:01,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d202d7abd8e46bb928948fee54c02a3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3 2024-12-13T21:32:01,810 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3, entries=150, sequenceid=129, filesize=30.2 K 2024-12-13T21:32:01,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/60cbf8b892d14301a006cf6ca9dd97d7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/60cbf8b892d14301a006cf6ca9dd97d7 2024-12-13T21:32:01,814 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/60cbf8b892d14301a006cf6ca9dd97d7, entries=150, sequenceid=129, filesize=11.7 K 2024-12-13T21:32:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/b7b0292b6f874c1096033298e998a2fc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b7b0292b6f874c1096033298e998a2fc 2024-12-13T21:32:01,817 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b7b0292b6f874c1096033298e998a2fc, entries=150, sequenceid=129, filesize=11.7 K 2024-12-13T21:32:01,818 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for be31d870e3b01c14f0b712223355e104 in 494ms, sequenceid=129, compaction requested=false 2024-12-13T21:32:01,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:01,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:01,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-13T21:32:01,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-13T21:32:01,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-13T21:32:01,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7220 sec 2024-12-13T21:32:01,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.7250 sec 2024-12-13T21:32:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:02,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:02,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213154fa6e8f9ef460dbbcb9efd1726cdcd_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:02,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742207_1383 (size=14794) 2024-12-13T21:32:02,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125582096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125582098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125582098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125582099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125582099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125582201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-13T21:32:02,204 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-13T21:32:02,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:02,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-13T21:32:02,206 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125582204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,206 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125582205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:02,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-13T21:32:02,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125582206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125582207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-13T21:32:02,358 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:02,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:02,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125582404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125582408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125582408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125582412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125582412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,457 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:02,461 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213154fa6e8f9ef460dbbcb9efd1726cdcd_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213154fa6e8f9ef460dbbcb9efd1726cdcd_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:02,461 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/85b3a6b5427c47e98a801d860f25bf2b, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:02,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/85b3a6b5427c47e98a801d860f25bf2b is 175, key is test_row_0/A:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:02,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742208_1384 (size=39749) 2024-12-13T21:32:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-13T21:32:02,510 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:02,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:02,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:02,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:02,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125582711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125582715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125582716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125582717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:02,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125582718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-13T21:32:02,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:02,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:02,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,869 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=145, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/85b3a6b5427c47e98a801d860f25bf2b 2024-12-13T21:32:02,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/12727f2cb83b47319dc63573277f2cb8 is 50, key is test_row_0/B:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:02,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742209_1385 (size=12151) 2024-12-13T21:32:02,967 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:02,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:02,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:02,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:02,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,120 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:03,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:03,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125583217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125583225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125583226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125583226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125583226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,273 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:03,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:03,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,274 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:03,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/12727f2cb83b47319dc63573277f2cb8 2024-12-13T21:32:03,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/903221380b8e40b2888d6359ac72b6cd is 50, key is test_row_0/C:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:03,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-13T21:32:03,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742210_1386 (size=12151) 2024-12-13T21:32:03,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/903221380b8e40b2888d6359ac72b6cd 2024-12-13T21:32:03,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/85b3a6b5427c47e98a801d860f25bf2b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b 2024-12-13T21:32:03,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b, entries=200, sequenceid=145, filesize=38.8 K 2024-12-13T21:32:03,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/12727f2cb83b47319dc63573277f2cb8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/12727f2cb83b47319dc63573277f2cb8 2024-12-13T21:32:03,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/12727f2cb83b47319dc63573277f2cb8, entries=150, sequenceid=145, filesize=11.9 K 2024-12-13T21:32:03,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/903221380b8e40b2888d6359ac72b6cd as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/903221380b8e40b2888d6359ac72b6cd 2024-12-13T21:32:03,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/903221380b8e40b2888d6359ac72b6cd, entries=150, sequenceid=145, filesize=11.9 K 2024-12-13T21:32:03,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for be31d870e3b01c14f0b712223355e104 in 1282ms, sequenceid=145, compaction requested=true 2024-12-13T21:32:03,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:03,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:03,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:03,330 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:03,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:03,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:03,331 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:03,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:03,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:03,331 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:03,331 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:03,331 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:03,331 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:03,331 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,331 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,332 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/6a943d36b84345f791dce9cdaab8f058, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/60cbf8b892d14301a006cf6ca9dd97d7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/12727f2cb83b47319dc63573277f2cb8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=35.5 K 2024-12-13T21:32:03,332 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1b286a45307e4df9a07a4bfc98e61dc2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=99.5 K 2024-12-13T21:32:03,332 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1b286a45307e4df9a07a4bfc98e61dc2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b] 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a943d36b84345f791dce9cdaab8f058, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734125520121 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b286a45307e4df9a07a4bfc98e61dc2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734125520121 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 60cbf8b892d14301a006cf6ca9dd97d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734125520262 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d202d7abd8e46bb928948fee54c02a3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734125520262 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 12727f2cb83b47319dc63573277f2cb8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734125521428 2024-12-13T21:32:03,332 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85b3a6b5427c47e98a801d860f25bf2b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734125521428 2024-12-13T21:32:03,354 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:03,356 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:03,356 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/fed4fd7ed5ed44648858f1c66c75d140 is 50, key is test_row_0/B:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:03,358 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213dc091f7eb06d455d8cbf4aff68f793ae_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:03,359 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213dc091f7eb06d455d8cbf4aff68f793ae_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:03,360 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213dc091f7eb06d455d8cbf4aff68f793ae_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:03,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742211_1387 (size=12493) 2024-12-13T21:32:03,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742212_1388 (size=4469) 2024-12-13T21:32:03,369 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#323 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:03,369 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/7b0dbaa2d254490986244c1d530c7b60 is 175, key is test_row_0/A:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:03,377 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/fed4fd7ed5ed44648858f1c66c75d140 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/fed4fd7ed5ed44648858f1c66c75d140 2024-12-13T21:32:03,380 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into fed4fd7ed5ed44648858f1c66c75d140(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:03,380 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:03,380 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125523330; duration=0sec 2024-12-13T21:32:03,380 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:03,380 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:03,381 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:03,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742213_1389 (size=31447) 2024-12-13T21:32:03,382 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:03,382 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:03,382 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,382 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/505706b0d93342ae87bc42e8a817198b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b7b0292b6f874c1096033298e998a2fc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/903221380b8e40b2888d6359ac72b6cd] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=35.5 K 2024-12-13T21:32:03,382 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 505706b0d93342ae87bc42e8a817198b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734125520121 2024-12-13T21:32:03,382 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b7b0292b6f874c1096033298e998a2fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734125520262 2024-12-13T21:32:03,383 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 903221380b8e40b2888d6359ac72b6cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734125521428 2024-12-13T21:32:03,387 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#325 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:03,388 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/380987ae6d6f41838d71de81be9d543e is 50, key is test_row_0/C:col10/1734125521433/Put/seqid=0 2024-12-13T21:32:03,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742214_1390 (size=12493) 2024-12-13T21:32:03,394 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/380987ae6d6f41838d71de81be9d543e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/380987ae6d6f41838d71de81be9d543e 2024-12-13T21:32:03,398 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into 380987ae6d6f41838d71de81be9d543e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:03,398 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:03,398 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125523331; duration=0sec 2024-12-13T21:32:03,398 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:03,398 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:03,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:03,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-13T21:32:03,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:03,426 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:32:03,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:03,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:03,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:03,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:03,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:03,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412130919f97ad42747cb99832b93b975ee3a_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125522098/Put/seqid=0 2024-12-13T21:32:03,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742215_1391 (size=12304) 2024-12-13T21:32:03,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:03,440 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412130919f97ad42747cb99832b93b975ee3a_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130919f97ad42747cb99832b93b975ee3a_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:03,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1fcbea5142604d09a0af8adbb2cf484a, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:03,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1fcbea5142604d09a0af8adbb2cf484a is 175, key is test_row_0/A:col10/1734125522098/Put/seqid=0 2024-12-13T21:32:03,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742216_1392 (size=31105) 2024-12-13T21:32:03,785 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/7b0dbaa2d254490986244c1d530c7b60 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/7b0dbaa2d254490986244c1d530c7b60 2024-12-13T21:32:03,789 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into 7b0dbaa2d254490986244c1d530c7b60(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:03,789 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:03,789 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125523330; duration=0sec 2024-12-13T21:32:03,789 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:03,789 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:03,845 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1fcbea5142604d09a0af8adbb2cf484a 2024-12-13T21:32:03,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/89bb805083f0447891e919ec18044a98 is 50, key is test_row_0/B:col10/1734125522098/Put/seqid=0 2024-12-13T21:32:03,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742217_1393 (size=12151) 2024-12-13T21:32:04,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:04,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:04,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125584239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125584239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125584241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125584242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125584242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,254 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/89bb805083f0447891e919ec18044a98 2024-12-13T21:32:04,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a1cbfd87cf6e491097e4fea5172d9fbd is 50, key is test_row_0/C:col10/1734125522098/Put/seqid=0 2024-12-13T21:32:04,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742218_1394 (size=12151) 2024-12-13T21:32:04,266 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a1cbfd87cf6e491097e4fea5172d9fbd 2024-12-13T21:32:04,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/1fcbea5142604d09a0af8adbb2cf484a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a 2024-12-13T21:32:04,275 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a, entries=150, sequenceid=171, filesize=30.4 K 2024-12-13T21:32:04,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/89bb805083f0447891e919ec18044a98 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/89bb805083f0447891e919ec18044a98 2024-12-13T21:32:04,278 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/89bb805083f0447891e919ec18044a98, entries=150, sequenceid=171, filesize=11.9 K 2024-12-13T21:32:04,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a1cbfd87cf6e491097e4fea5172d9fbd as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a1cbfd87cf6e491097e4fea5172d9fbd 2024-12-13T21:32:04,282 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a1cbfd87cf6e491097e4fea5172d9fbd, entries=150, sequenceid=171, filesize=11.9 K 2024-12-13T21:32:04,282 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for be31d870e3b01c14f0b712223355e104 in 856ms, sequenceid=171, compaction requested=false 2024-12-13T21:32:04,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:04,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-13T21:32:04,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-13T21:32:04,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-13T21:32:04,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0760 sec 2024-12-13T21:32:04,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.0810 sec 2024-12-13T21:32:04,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-13T21:32:04,310 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-13T21:32:04,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-13T21:32:04,313 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:04,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:04,313 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:04,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:04,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:32:04,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:04,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:04,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:04,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:04,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:04,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:04,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412130cc5908a53f14760aec7d4c418db04f1_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:04,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742219_1395 (size=19774) 2024-12-13T21:32:04,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125584375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125584376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125584380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:04,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125584482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125584483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125584485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:04,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:04,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:04,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,617 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125584686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125584686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125584692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,768 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:04,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:04,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,776 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:04,779 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412130cc5908a53f14760aec7d4c418db04f1_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130cc5908a53f14760aec7d4c418db04f1_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:04,779 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d5ebfcf077d4132867204b4ea6f7bcc, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:04,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d5ebfcf077d4132867204b4ea6f7bcc is 175, key is test_row_0/A:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:04,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742220_1396 (size=57033) 2024-12-13T21:32:04,783 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=187, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d5ebfcf077d4132867204b4ea6f7bcc 2024-12-13T21:32:04,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/f0f44ba227f24f99ba8656f08b132d74 is 50, key is test_row_0/B:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742221_1397 (size=12151) 2024-12-13T21:32:04,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:04,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:04,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:04,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125584991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125584992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:04,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:04,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125584995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,072 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:05,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:05,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/f0f44ba227f24f99ba8656f08b132d74 2024-12-13T21:32:05,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/44c01810e0794445a3fee88d7b3f8b12 is 50, key is test_row_0/C:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:05,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742222_1398 (size=12151) 2024-12-13T21:32:05,225 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:05,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:05,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:05,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:05,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:05,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125585497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125585499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125585500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:05,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:05,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:05,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/44c01810e0794445a3fee88d7b3f8b12 2024-12-13T21:32:05,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/8d5ebfcf077d4132867204b4ea6f7bcc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc 2024-12-13T21:32:05,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc, entries=300, sequenceid=187, filesize=55.7 K 2024-12-13T21:32:05,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/f0f44ba227f24f99ba8656f08b132d74 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/f0f44ba227f24f99ba8656f08b132d74 2024-12-13T21:32:05,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/f0f44ba227f24f99ba8656f08b132d74, entries=150, sequenceid=187, filesize=11.9 K 2024-12-13T21:32:05,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/44c01810e0794445a3fee88d7b3f8b12 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/44c01810e0794445a3fee88d7b3f8b12 2024-12-13T21:32:05,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/44c01810e0794445a3fee88d7b3f8b12, entries=150, sequenceid=187, filesize=11.9 K 2024-12-13T21:32:05,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for be31d870e3b01c14f0b712223355e104 in 1266ms, sequenceid=187, compaction requested=true 2024-12-13T21:32:05,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:05,617 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:05,617 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:05,618 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,618 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,618 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/fed4fd7ed5ed44648858f1c66c75d140, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/89bb805083f0447891e919ec18044a98, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/f0f44ba227f24f99ba8656f08b132d74] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=35.9 K 2024-12-13T21:32:05,618 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/7b0dbaa2d254490986244c1d530c7b60, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=116.8 K 2024-12-13T21:32:05,618 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/7b0dbaa2d254490986244c1d530c7b60, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc] 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fed4fd7ed5ed44648858f1c66c75d140, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734125521428 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b0dbaa2d254490986244c1d530c7b60, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734125521428 2024-12-13T21:32:05,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 89bb805083f0447891e919ec18044a98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125522098 2024-12-13T21:32:05,619 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fcbea5142604d09a0af8adbb2cf484a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125522098 2024-12-13T21:32:05,619 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting f0f44ba227f24f99ba8656f08b132d74, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1734125524349 2024-12-13T21:32:05,619 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d5ebfcf077d4132867204b4ea6f7bcc, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1734125524240 2024-12-13T21:32:05,624 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:05,624 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#332 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:05,625 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/c9552aeb39cd4e08a2423d039c910d69 is 50, key is test_row_0/B:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:05,627 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213cdb51777691b4b86b3f24abfd5237618_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:05,628 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213cdb51777691b4b86b3f24abfd5237618_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:05,628 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213cdb51777691b4b86b3f24abfd5237618_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:05,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742223_1399 (size=12595) 2024-12-13T21:32:05,649 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/c9552aeb39cd4e08a2423d039c910d69 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/c9552aeb39cd4e08a2423d039c910d69 2024-12-13T21:32:05,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742224_1400 (size=4469) 2024-12-13T21:32:05,651 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#333 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:05,651 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bc6a315af18843eca94ca03841bed379 is 175, key is test_row_0/A:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:05,655 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into c9552aeb39cd4e08a2423d039c910d69(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:05,655 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:05,655 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125525617; duration=0sec 2024-12-13T21:32:05,655 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:05,655 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:05,655 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:05,656 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:05,656 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:05,656 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,656 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/380987ae6d6f41838d71de81be9d543e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a1cbfd87cf6e491097e4fea5172d9fbd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/44c01810e0794445a3fee88d7b3f8b12] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=35.9 K 2024-12-13T21:32:05,656 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 380987ae6d6f41838d71de81be9d543e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1734125521428 2024-12-13T21:32:05,657 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a1cbfd87cf6e491097e4fea5172d9fbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734125522098 2024-12-13T21:32:05,657 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 44c01810e0794445a3fee88d7b3f8b12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1734125524349 2024-12-13T21:32:05,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742225_1401 (size=31549) 2024-12-13T21:32:05,663 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#334 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:05,663 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/9f2df22a867f44ee8b334e104154113b is 50, key is test_row_0/C:col10/1734125524349/Put/seqid=0 2024-12-13T21:32:05,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742226_1402 (size=12595) 2024-12-13T21:32:05,667 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bc6a315af18843eca94ca03841bed379 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bc6a315af18843eca94ca03841bed379 2024-12-13T21:32:05,671 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into bc6a315af18843eca94ca03841bed379(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:05,671 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:05,671 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125525617; duration=0sec 2024-12-13T21:32:05,671 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:05,671 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:05,672 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/9f2df22a867f44ee8b334e104154113b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/9f2df22a867f44ee8b334e104154113b 2024-12-13T21:32:05,675 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into 9f2df22a867f44ee8b334e104154113b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:05,675 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:05,675 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125525617; duration=0sec 2024-12-13T21:32:05,675 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:05,675 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:05,681 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:05,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:05,682 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:05,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:05,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213a8b65df167964a61b49f2394c43cfcc5_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125524379/Put/seqid=0 2024-12-13T21:32:05,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742227_1403 (size=12304) 2024-12-13T21:32:05,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:05,693 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213a8b65df167964a61b49f2394c43cfcc5_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a8b65df167964a61b49f2394c43cfcc5_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fadbea4b9df94fb89d47326529588fbe, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fadbea4b9df94fb89d47326529588fbe is 175, key is test_row_0/A:col10/1734125524379/Put/seqid=0 2024-12-13T21:32:05,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742228_1404 (size=31105) 2024-12-13T21:32:06,099 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fadbea4b9df94fb89d47326529588fbe 2024-12-13T21:32:06,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/0d06fa05f58147d6a48742512878086a is 50, key is test_row_0/B:col10/1734125524379/Put/seqid=0 2024-12-13T21:32:06,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742229_1405 (size=12151) 2024-12-13T21:32:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:06,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125586275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125586277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125586379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125586382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:06,508 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/0d06fa05f58147d6a48742512878086a 2024-12-13T21:32:06,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125586505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125586509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/968dc09b281948eda6321b5f1c2bd1f1 is 50, key is test_row_0/C:col10/1734125524379/Put/seqid=0 2024-12-13T21:32:06,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125586510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742230_1406 (size=12151) 2024-12-13T21:32:06,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125586582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125586586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125586887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:06,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125586891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:06,930 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/968dc09b281948eda6321b5f1c2bd1f1 2024-12-13T21:32:06,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/fadbea4b9df94fb89d47326529588fbe as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe 2024-12-13T21:32:06,937 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe, entries=150, sequenceid=211, filesize=30.4 K 2024-12-13T21:32:06,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/0d06fa05f58147d6a48742512878086a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0d06fa05f58147d6a48742512878086a 2024-12-13T21:32:06,940 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0d06fa05f58147d6a48742512878086a, entries=150, sequenceid=211, filesize=11.9 K 2024-12-13T21:32:06,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/968dc09b281948eda6321b5f1c2bd1f1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/968dc09b281948eda6321b5f1c2bd1f1 2024-12-13T21:32:06,944 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/968dc09b281948eda6321b5f1c2bd1f1, entries=150, sequenceid=211, filesize=11.9 K 2024-12-13T21:32:06,945 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for be31d870e3b01c14f0b712223355e104 in 1263ms, sequenceid=211, compaction requested=false 2024-12-13T21:32:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-13T21:32:06,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-13T21:32:06,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-13T21:32:06,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6330 sec 2024-12-13T21:32:06,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 2.6360 sec 2024-12-13T21:32:07,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:07,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-13T21:32:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:07,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ffafd53cdb384195ba769a3244e51e4a_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:07,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742231_1407 (size=14794) 2024-12-13T21:32:07,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125587466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:07,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125587470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:07,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125587571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:07,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125587575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:07,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125587774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:07,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:07,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125587780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:07,809 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:07,811 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ffafd53cdb384195ba769a3244e51e4a_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ffafd53cdb384195ba769a3244e51e4a_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:07,814 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/54f6386cf6384d409c33dc2dd22e1511, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:07,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/54f6386cf6384d409c33dc2dd22e1511 is 175, key is test_row_0/A:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:07,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742232_1408 (size=39749) 2024-12-13T21:32:08,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125588078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125588084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,225 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=227, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/54f6386cf6384d409c33dc2dd22e1511 2024-12-13T21:32:08,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/03e506eee76148e58b827f8bb09e79c3 is 50, key is test_row_0/B:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:08,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742233_1409 (size=12151) 2024-12-13T21:32:08,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/03e506eee76148e58b827f8bb09e79c3 2024-12-13T21:32:08,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a3cebf7b110d4332a824691b8bd73dbd is 50, key is test_row_0/C:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:08,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742234_1410 (size=12151) 2024-12-13T21:32:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-13T21:32:08,417 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-13T21:32:08,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-13T21:32:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-13T21:32:08,419 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:08,419 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:08,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:08,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-13T21:32:08,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125588520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,522 DEBUG [Thread-1568 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:32:08,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125588525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,528 DEBUG [Thread-1564 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:32:08,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125588530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,532 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:32:08,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-13T21:32:08,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:08,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:08,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:08,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:08,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:08,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:08,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125588586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125588590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a3cebf7b110d4332a824691b8bd73dbd 2024-12-13T21:32:08,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/54f6386cf6384d409c33dc2dd22e1511 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511 2024-12-13T21:32:08,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511, entries=200, sequenceid=227, filesize=38.8 K 2024-12-13T21:32:08,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/03e506eee76148e58b827f8bb09e79c3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/03e506eee76148e58b827f8bb09e79c3 2024-12-13T21:32:08,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/03e506eee76148e58b827f8bb09e79c3, entries=150, sequenceid=227, filesize=11.9 K 2024-12-13T21:32:08,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a3cebf7b110d4332a824691b8bd73dbd as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a3cebf7b110d4332a824691b8bd73dbd 2024-12-13T21:32:08,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a3cebf7b110d4332a824691b8bd73dbd, entries=150, sequenceid=227, filesize=11.9 K 2024-12-13T21:32:08,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for be31d870e3b01c14f0b712223355e104 in 1262ms, sequenceid=227, compaction requested=true 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:08,657 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:08,657 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:08,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:08,658 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:08,658 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:08,658 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bc6a315af18843eca94ca03841bed379, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=100.0 K 2024-12-13T21:32:08,658 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/c9552aeb39cd4e08a2423d039c910d69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0d06fa05f58147d6a48742512878086a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/03e506eee76148e58b827f8bb09e79c3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.0 K 2024-12-13T21:32:08,658 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bc6a315af18843eca94ca03841bed379, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511] 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c9552aeb39cd4e08a2423d039c910d69, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1734125524349 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc6a315af18843eca94ca03841bed379, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1734125524349 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d06fa05f58147d6a48742512878086a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734125524369 2024-12-13T21:32:08,658 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fadbea4b9df94fb89d47326529588fbe, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734125524369 2024-12-13T21:32:08,659 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 03e506eee76148e58b827f8bb09e79c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1734125526265 2024-12-13T21:32:08,659 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54f6386cf6384d409c33dc2dd22e1511, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1734125526265 2024-12-13T21:32:08,663 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:08,664 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#341 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:08,664 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/9b9fd99238654f009f0e92aaa281a458 is 50, key is test_row_0/B:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:08,665 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412134d5205bf9ae54b0a86c46aefaaafa435_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:08,667 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412134d5205bf9ae54b0a86c46aefaaafa435_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:08,667 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412134d5205bf9ae54b0a86c46aefaaafa435_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:08,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742235_1411 (size=12697) 2024-12-13T21:32:08,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742236_1412 (size=4469) 2024-12-13T21:32:08,689 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#342 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:08,689 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/faa24a2f089a4bdaaaf464c4b18ff131 is 175, key is test_row_0/A:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742237_1413 (size=31651) 2024-12-13T21:32:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-13T21:32:08,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:08,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-13T21:32:08,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:08,723 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-13T21:32:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:08,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:08,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412134eef209b323a4b2382265e2e01941d0b_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125527469/Put/seqid=0 2024-12-13T21:32:08,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742238_1414 (size=12304) 2024-12-13T21:32:08,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:08,739 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412134eef209b323a4b2382265e2e01941d0b_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412134eef209b323a4b2382265e2e01941d0b_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:08,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/a1adcc55b6214bb2bc47ef622375969a, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:08,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/a1adcc55b6214bb2bc47ef622375969a is 175, key is test_row_0/A:col10/1734125527469/Put/seqid=0 2024-12-13T21:32:08,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742239_1415 (size=31105) 2024-12-13T21:32:09,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-13T21:32:09,078 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/9b9fd99238654f009f0e92aaa281a458 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9b9fd99238654f009f0e92aaa281a458 2024-12-13T21:32:09,081 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into 9b9fd99238654f009f0e92aaa281a458(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:09,081 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:09,081 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125528657; duration=0sec 2024-12-13T21:32:09,081 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:09,081 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:09,081 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:09,082 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:09,082 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:09,082 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:09,082 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/9f2df22a867f44ee8b334e104154113b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/968dc09b281948eda6321b5f1c2bd1f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a3cebf7b110d4332a824691b8bd73dbd] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.0 K 2024-12-13T21:32:09,082 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f2df22a867f44ee8b334e104154113b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1734125524349 2024-12-13T21:32:09,082 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 968dc09b281948eda6321b5f1c2bd1f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734125524369 2024-12-13T21:32:09,083 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a3cebf7b110d4332a824691b8bd73dbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1734125526265 2024-12-13T21:32:09,090 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#344 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:09,090 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a01f669fcfde42e1b84157b8f1bbd444 is 50, key is test_row_0/C:col10/1734125527394/Put/seqid=0 2024-12-13T21:32:09,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742240_1416 (size=12697) 2024-12-13T21:32:09,097 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/faa24a2f089a4bdaaaf464c4b18ff131 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/faa24a2f089a4bdaaaf464c4b18ff131 2024-12-13T21:32:09,101 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into faa24a2f089a4bdaaaf464c4b18ff131(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:09,101 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:09,101 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125528657; duration=0sec 2024-12-13T21:32:09,102 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:09,102 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:09,144 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/a1adcc55b6214bb2bc47ef622375969a 2024-12-13T21:32:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/96e908ad70ea431ebf007f4181c505c1 is 50, key is test_row_0/B:col10/1734125527469/Put/seqid=0 2024-12-13T21:32:09,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742241_1417 (size=12151) 2024-12-13T21:32:09,154 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/96e908ad70ea431ebf007f4181c505c1 2024-12-13T21:32:09,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/e51092f385514d8abe5faf7be7473c5a is 50, key is test_row_0/C:col10/1734125527469/Put/seqid=0 2024-12-13T21:32:09,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742242_1418 (size=12151) 2024-12-13T21:32:09,505 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/a01f669fcfde42e1b84157b8f1bbd444 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a01f669fcfde42e1b84157b8f1bbd444 2024-12-13T21:32:09,511 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into a01f669fcfde42e1b84157b8f1bbd444(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:09,511 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:09,511 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125528657; duration=0sec 2024-12-13T21:32:09,511 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:09,511 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-13T21:32:09,564 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/e51092f385514d8abe5faf7be7473c5a 2024-12-13T21:32:09,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/a1adcc55b6214bb2bc47ef622375969a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a 2024-12-13T21:32:09,573 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a, entries=150, sequenceid=247, filesize=30.4 K 2024-12-13T21:32:09,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/96e908ad70ea431ebf007f4181c505c1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/96e908ad70ea431ebf007f4181c505c1 2024-12-13T21:32:09,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,576 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/96e908ad70ea431ebf007f4181c505c1, entries=150, sequenceid=247, filesize=11.9 K 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/e51092f385514d8abe5faf7be7473c5a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/e51092f385514d8abe5faf7be7473c5a 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,580 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/e51092f385514d8abe5faf7be7473c5a, entries=150, sequenceid=247, filesize=11.9 K 2024-12-13T21:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for be31d870e3b01c14f0b712223355e104 in 858ms, sequenceid=247, compaction requested=false 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:09,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:09,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1630 sec 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.1650 sec 2024-12-13T21:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:09,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:09,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:32:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:09,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:09,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:09,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412139364a0a350b04ccca38a7c3ef9ebf93e_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:09,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742243_1419 (size=14994) 2024-12-13T21:32:09,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:09,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125589782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:09,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:09,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125589782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:09,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125589887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:09,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125589887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:10,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125590091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:10,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125590092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,126 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:10,129 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412139364a0a350b04ccca38a7c3ef9ebf93e_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412139364a0a350b04ccca38a7c3ef9ebf93e_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:10,129 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bfeb1cbec21a488db73c28f75989e229, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:10,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bfeb1cbec21a488db73c28f75989e229 is 175, key is test_row_0/A:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:10,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742244_1420 (size=39949) 2024-12-13T21:32:10,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125590395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125590396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-13T21:32:10,523 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-13T21:32:10,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:10,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-13T21:32:10,550 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bfeb1cbec21a488db73c28f75989e229 2024-12-13T21:32:10,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:10,551 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:10,551 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:10,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:10,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/b5e47c7d50b14165b71fd09c55759881 is 50, key is test_row_0/B:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:10,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742245_1421 (size=12301) 2024-12-13T21:32:10,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:10,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-13T21:32:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:10,705 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:10,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:10,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:10,857 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-13T21:32:10,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:10,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:10,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:10,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:10,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:10,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:10,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125590899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:10,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125590901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:11,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/b5e47c7d50b14165b71fd09c55759881 2024-12-13T21:32:11,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/8193c2721bf04d5e8ca62ac71cb602b5 is 50, key is test_row_0/C:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:11,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742246_1422 (size=12301) 2024-12-13T21:32:11,010 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:11,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-13T21:32:11,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:11,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:11,162 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:11,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-13T21:32:11,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:11,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,314 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:11,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-13T21:32:11,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:11,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:11,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/8193c2721bf04d5e8ca62ac71cb602b5 2024-12-13T21:32:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/bfeb1cbec21a488db73c28f75989e229 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229 2024-12-13T21:32:11,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229, entries=200, sequenceid=262, filesize=39.0 K 2024-12-13T21:32:11,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/b5e47c7d50b14165b71fd09c55759881 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b5e47c7d50b14165b71fd09c55759881 2024-12-13T21:32:11,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b5e47c7d50b14165b71fd09c55759881, entries=150, sequenceid=262, filesize=12.0 K 2024-12-13T21:32:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/8193c2721bf04d5e8ca62ac71cb602b5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/8193c2721bf04d5e8ca62ac71cb602b5 2024-12-13T21:32:11,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/8193c2721bf04d5e8ca62ac71cb602b5, entries=150, sequenceid=262, filesize=12.0 K 2024-12-13T21:32:11,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for be31d870e3b01c14f0b712223355e104 in 1715ms, sequenceid=262, compaction requested=true 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:11,424 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:11,424 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:11,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:11,424 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:11,424 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:11,424 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:11,424 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:11,425 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,425 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,425 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9b9fd99238654f009f0e92aaa281a458, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/96e908ad70ea431ebf007f4181c505c1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b5e47c7d50b14165b71fd09c55759881] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.3 K 2024-12-13T21:32:11,425 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/faa24a2f089a4bdaaaf464c4b18ff131, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=100.3 K 2024-12-13T21:32:11,425 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,425 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/faa24a2f089a4bdaaaf464c4b18ff131, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229] 2024-12-13T21:32:11,425 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b9fd99238654f009f0e92aaa281a458, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1734125526265 2024-12-13T21:32:11,425 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting faa24a2f089a4bdaaaf464c4b18ff131, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1734125526265 2024-12-13T21:32:11,425 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 96e908ad70ea431ebf007f4181c505c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734125527428 2024-12-13T21:32:11,425 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1adcc55b6214bb2bc47ef622375969a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734125527428 2024-12-13T21:32:11,425 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting b5e47c7d50b14165b71fd09c55759881, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734125529693 2024-12-13T21:32:11,426 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfeb1cbec21a488db73c28f75989e229, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734125529693 2024-12-13T21:32:11,430 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:11,431 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#350 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:11,431 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/3bc3596e253c49a497a99c793aac4a4c is 50, key is test_row_0/B:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:11,432 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213d18fb56634634acdb30972065a4ea140_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:11,433 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213d18fb56634634acdb30972065a4ea140_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:11,433 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213d18fb56634634acdb30972065a4ea140_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:11,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742247_1423 (size=12949) 2024-12-13T21:32:11,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742248_1424 (size=4469) 2024-12-13T21:32:11,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:11,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-13T21:32:11,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,467 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:32:11,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:11,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:11,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:11,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:11,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:11,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:11,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213a02cd6fbc25f4f44847b5d9af3661815_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125529781/Put/seqid=0 2024-12-13T21:32:11,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742249_1425 (size=12454) 2024-12-13T21:32:11,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:11,837 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#351 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:11,838 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/af4e404b7b024a04bc38825f032eec78 is 175, key is test_row_0/A:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:11,850 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/3bc3596e253c49a497a99c793aac4a4c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3bc3596e253c49a497a99c793aac4a4c 2024-12-13T21:32:11,860 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into 3bc3596e253c49a497a99c793aac4a4c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:11,860 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:11,860 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125531424; duration=0sec 2024-12-13T21:32:11,860 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:11,860 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:11,860 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:11,862 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:11,862 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:11,862 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:11,862 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a01f669fcfde42e1b84157b8f1bbd444, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/e51092f385514d8abe5faf7be7473c5a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/8193c2721bf04d5e8ca62ac71cb602b5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.3 K 2024-12-13T21:32:11,863 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a01f669fcfde42e1b84157b8f1bbd444, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1734125526265 2024-12-13T21:32:11,863 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e51092f385514d8abe5faf7be7473c5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734125527428 2024-12-13T21:32:11,864 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8193c2721bf04d5e8ca62ac71cb602b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734125529693 2024-12-13T21:32:11,877 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:11,877 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/bfa7eb49533244f78f2f005368fbdba2 is 50, key is test_row_0/C:col10/1734125529693/Put/seqid=0 2024-12-13T21:32:11,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:11,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742250_1426 (size=31903) 2024-12-13T21:32:11,886 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213a02cd6fbc25f4f44847b5d9af3661815_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a02cd6fbc25f4f44847b5d9af3661815_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:11,887 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/af4e404b7b024a04bc38825f032eec78 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/af4e404b7b024a04bc38825f032eec78 2024-12-13T21:32:11,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/edf766ed44f24531abf2a2d5d8dc0044, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:11,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/edf766ed44f24531abf2a2d5d8dc0044 is 175, key is test_row_0/A:col10/1734125529781/Put/seqid=0 2024-12-13T21:32:11,902 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into af4e404b7b024a04bc38825f032eec78(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:11,902 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:11,902 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125531424; duration=0sec 2024-12-13T21:32:11,902 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:11,902 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:11,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:11,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742251_1427 (size=12949) 2024-12-13T21:32:11,922 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/bfa7eb49533244f78f2f005368fbdba2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/bfa7eb49533244f78f2f005368fbdba2 2024-12-13T21:32:11,927 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into bfa7eb49533244f78f2f005368fbdba2(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:11,927 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:11,927 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125531424; duration=0sec 2024-12-13T21:32:11,927 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:11,927 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:11,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742252_1428 (size=31255) 2024-12-13T21:32:11,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:11,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125591935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125591935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125592044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125592044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125592247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125592248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,329 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/edf766ed44f24531abf2a2d5d8dc0044 2024-12-13T21:32:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/27ea084cc2894c53b8538b2d07814469 is 50, key is test_row_0/B:col10/1734125529781/Put/seqid=0 2024-12-13T21:32:12,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742253_1429 (size=12301) 2024-12-13T21:32:12,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40812 deadline: 1734125592547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,550 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:32:12,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125592552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125592552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40876 deadline: 1734125592552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,555 DEBUG [Thread-1564 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:32:12,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:12,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40892 deadline: 1734125592556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:12,559 DEBUG [Thread-1568 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:32:12,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:12,739 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/27ea084cc2894c53b8538b2d07814469 2024-12-13T21:32:12,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/7d2f80b934ab4473b4b037fe57544037 is 50, key is test_row_0/C:col10/1734125529781/Put/seqid=0 2024-12-13T21:32:12,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742254_1430 (size=12301) 2024-12-13T21:32:12,748 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/7d2f80b934ab4473b4b037fe57544037 2024-12-13T21:32:12,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/edf766ed44f24531abf2a2d5d8dc0044 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044 2024-12-13T21:32:12,754 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044, entries=150, sequenceid=287, filesize=30.5 K 2024-12-13T21:32:12,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/27ea084cc2894c53b8538b2d07814469 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/27ea084cc2894c53b8538b2d07814469 2024-12-13T21:32:12,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,758 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/27ea084cc2894c53b8538b2d07814469, entries=150, sequenceid=287, filesize=12.0 K 2024-12-13T21:32:12,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/7d2f80b934ab4473b4b037fe57544037 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/7d2f80b934ab4473b4b037fe57544037 2024-12-13T21:32:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/7d2f80b934ab4473b4b037fe57544037, entries=150, sequenceid=287, filesize=12.0 K 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for be31d870e3b01c14f0b712223355e104 in 1296ms, sequenceid=287, compaction requested=false 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:12,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-13T21:32:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-13T21:32:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-13T21:32:12,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2130 sec 2024-12-13T21:32:12,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.2420 sec 2024-12-13T21:32:12,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:13,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:32:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:13,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:13,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:13,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213d96116249ffd4ae6b6451207fe71682f_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742255_1431 (size=14994) 2024-12-13T21:32:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,090 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,094 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213d96116249ffd4ae6b6451207fe71682f_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213d96116249ffd4ae6b6451207fe71682f_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:13,095 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/78688a92576741b18ff781eb832bb9d6, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/78688a92576741b18ff781eb832bb9d6 is 175, key is test_row_0/A:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742256_1432 (size=39949) 2024-12-13T21:32:13,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125593156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125593156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125593260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125593262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125593465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125593466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,510 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/78688a92576741b18ff781eb832bb9d6 2024-12-13T21:32:13,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/2f64358106a54eb9b99e502140dfed68 is 50, key is test_row_0/B:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:13,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742257_1433 (size=12301) 2024-12-13T21:32:13,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125593769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125593769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:13,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/2f64358106a54eb9b99e502140dfed68 2024-12-13T21:32:13,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/f87d8d641b9d40d0b70e3de6d1b45060 is 50, key is test_row_0/C:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:13,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742258_1434 (size=12301) 2024-12-13T21:32:14,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125594272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:14,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:14,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125594274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:14,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/f87d8d641b9d40d0b70e3de6d1b45060 2024-12-13T21:32:14,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/78688a92576741b18ff781eb832bb9d6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6 2024-12-13T21:32:14,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6, entries=200, sequenceid=303, filesize=39.0 K 2024-12-13T21:32:14,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/2f64358106a54eb9b99e502140dfed68 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/2f64358106a54eb9b99e502140dfed68 2024-12-13T21:32:14,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/2f64358106a54eb9b99e502140dfed68, entries=150, sequenceid=303, filesize=12.0 K 2024-12-13T21:32:14,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/f87d8d641b9d40d0b70e3de6d1b45060 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/f87d8d641b9d40d0b70e3de6d1b45060 2024-12-13T21:32:14,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/f87d8d641b9d40d0b70e3de6d1b45060, entries=150, sequenceid=303, filesize=12.0 K 2024-12-13T21:32:14,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for be31d870e3b01c14f0b712223355e104 in 1266ms, sequenceid=303, compaction requested=true 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:14,341 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:14,341 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:14,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:14,342 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:14,342 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:14,342 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:14,342 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:14,342 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:14,342 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:14,342 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/af4e404b7b024a04bc38825f032eec78, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=100.7 K 2024-12-13T21:32:14,342 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3bc3596e253c49a497a99c793aac4a4c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/27ea084cc2894c53b8538b2d07814469, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/2f64358106a54eb9b99e502140dfed68] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.7 K 2024-12-13T21:32:14,342 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:14,342 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/af4e404b7b024a04bc38825f032eec78, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6] 2024-12-13T21:32:14,342 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bc3596e253c49a497a99c793aac4a4c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734125529693 2024-12-13T21:32:14,343 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 27ea084cc2894c53b8538b2d07814469, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734125529760 2024-12-13T21:32:14,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting af4e404b7b024a04bc38825f032eec78, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734125529693 2024-12-13T21:32:14,343 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f64358106a54eb9b99e502140dfed68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734125531934 2024-12-13T21:32:14,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting edf766ed44f24531abf2a2d5d8dc0044, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734125529760 2024-12-13T21:32:14,344 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78688a92576741b18ff781eb832bb9d6, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734125531927 2024-12-13T21:32:14,349 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#359 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:14,349 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:14,349 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/0112a1e72fb241d496e01c6a126ba257 is 50, key is test_row_0/B:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:14,351 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213ffd358cd1c874ccbaa67635dc6bfe3cc_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:14,353 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213ffd358cd1c874ccbaa67635dc6bfe3cc_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:14,353 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ffd358cd1c874ccbaa67635dc6bfe3cc_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:14,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742259_1435 (size=13051) 2024-12-13T21:32:14,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742260_1436 (size=4469) 2024-12-13T21:32:14,367 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#360 average throughput is 1.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:14,368 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/0fb2d61128984fdfbbae1d48d56ee616 is 175, key is test_row_0/A:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:14,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742261_1437 (size=32005) 2024-12-13T21:32:14,387 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/0fb2d61128984fdfbbae1d48d56ee616 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/0fb2d61128984fdfbbae1d48d56ee616 2024-12-13T21:32:14,390 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into 0fb2d61128984fdfbbae1d48d56ee616(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:14,390 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:14,390 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125534341; duration=0sec 2024-12-13T21:32:14,390 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:14,390 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:14,390 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:14,391 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:14,391 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:14,391 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:14,391 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/bfa7eb49533244f78f2f005368fbdba2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/7d2f80b934ab4473b4b037fe57544037, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/f87d8d641b9d40d0b70e3de6d1b45060] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.7 K 2024-12-13T21:32:14,391 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfa7eb49533244f78f2f005368fbdba2, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1734125529693 2024-12-13T21:32:14,392 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d2f80b934ab4473b4b037fe57544037, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734125529760 2024-12-13T21:32:14,392 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting f87d8d641b9d40d0b70e3de6d1b45060, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734125531934 2024-12-13T21:32:14,397 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#361 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:14,397 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/c159bc7ef08b4bdd81a4c06c505e2755 is 50, key is test_row_0/C:col10/1734125533074/Put/seqid=0 2024-12-13T21:32:14,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742262_1438 (size=13051) 2024-12-13T21:32:14,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-13T21:32:14,655 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-13T21:32:14,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:14,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-13T21:32:14,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T21:32:14,657 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:14,657 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:14,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:14,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T21:32:14,760 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/0112a1e72fb241d496e01c6a126ba257 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0112a1e72fb241d496e01c6a126ba257 2024-12-13T21:32:14,763 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into 0112a1e72fb241d496e01c6a126ba257(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:14,763 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:14,763 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125534341; duration=0sec 2024-12-13T21:32:14,763 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:14,763 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:14,809 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:14,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-13T21:32:14,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:14,810 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:32:14,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:14,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:14,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:14,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:14,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:14,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:14,811 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/c159bc7ef08b4bdd81a4c06c505e2755 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/c159bc7ef08b4bdd81a4c06c505e2755 2024-12-13T21:32:14,814 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into c159bc7ef08b4bdd81a4c06c505e2755(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:14,814 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:14,814 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125534341; duration=0sec 2024-12-13T21:32:14,815 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:14,815 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:14,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213231d29d2135b42409cd0fa82e5d3bff0_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125533154/Put/seqid=0 2024-12-13T21:32:14,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742263_1439 (size=12454) 2024-12-13T21:32:14,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:14,821 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213231d29d2135b42409cd0fa82e5d3bff0_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213231d29d2135b42409cd0fa82e5d3bff0_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:14,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/58b1bd9e577347d2b1c2c0dae32456b0, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:14,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/58b1bd9e577347d2b1c2c0dae32456b0 is 175, key is test_row_0/A:col10/1734125533154/Put/seqid=0 2024-12-13T21:32:14,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742264_1440 (size=31255) 2024-12-13T21:32:14,826 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=329, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/58b1bd9e577347d2b1c2c0dae32456b0 2024-12-13T21:32:14,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/d5243cf7d580417b8f9af2d2f584e31e is 50, key is test_row_0/B:col10/1734125533154/Put/seqid=0 2024-12-13T21:32:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742265_1441 (size=12301) 2024-12-13T21:32:14,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T21:32:15,235 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/d5243cf7d580417b8f9af2d2f584e31e 2024-12-13T21:32:15,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5a44107a9740429a92dcb4723d20c8f6 is 50, key is test_row_0/C:col10/1734125533154/Put/seqid=0 2024-12-13T21:32:15,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742266_1442 (size=12301) 2024-12-13T21:32:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T21:32:15,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:15,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:15,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125595294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125595294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125595395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125595397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125595599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125595600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,644 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5a44107a9740429a92dcb4723d20c8f6 2024-12-13T21:32:15,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/58b1bd9e577347d2b1c2c0dae32456b0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0 2024-12-13T21:32:15,649 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0, entries=150, sequenceid=329, filesize=30.5 K 2024-12-13T21:32:15,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/d5243cf7d580417b8f9af2d2f584e31e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d5243cf7d580417b8f9af2d2f584e31e 2024-12-13T21:32:15,653 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d5243cf7d580417b8f9af2d2f584e31e, entries=150, sequenceid=329, filesize=12.0 K 2024-12-13T21:32:15,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5a44107a9740429a92dcb4723d20c8f6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a44107a9740429a92dcb4723d20c8f6 2024-12-13T21:32:15,656 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a44107a9740429a92dcb4723d20c8f6, entries=150, sequenceid=329, filesize=12.0 K 2024-12-13T21:32:15,657 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for be31d870e3b01c14f0b712223355e104 in 847ms, sequenceid=329, compaction requested=false 2024-12-13T21:32:15,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:15,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:15,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-13T21:32:15,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-13T21:32:15,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-13T21:32:15,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0000 sec 2024-12-13T21:32:15,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.0030 sec 2024-12-13T21:32:15,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T21:32:15,759 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-13T21:32:15,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:15,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-13T21:32:15,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:15,761 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:15,762 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:15,762 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:15,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:15,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:15,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:32:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:15,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412130e96ecdac1d049b6afd90af9304bef7c_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:15,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:15,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:15,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:15,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742267_1443 (size=14994) 2024-12-13T21:32:15,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125595952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:15,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:15,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125595952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:16,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125596057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:16,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125596057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:16,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,219 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:16,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125596260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125596261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,277 DEBUG [Thread-1579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d3b05cf to 127.0.0.1:57927 2024-12-13T21:32:16,277 DEBUG [Thread-1579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:16,277 DEBUG [Thread-1581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x170d29d0 to 127.0.0.1:57927 2024-12-13T21:32:16,277 DEBUG [Thread-1581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:16,279 DEBUG [Thread-1583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b61f1c4 to 127.0.0.1:57927 2024-12-13T21:32:16,279 DEBUG [Thread-1583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:16,281 DEBUG [Thread-1575 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0523025d to 127.0.0.1:57927 2024-12-13T21:32:16,281 DEBUG [Thread-1575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:16,282 DEBUG [Thread-1577 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b9a1701 to 127.0.0.1:57927 2024-12-13T21:32:16,282 DEBUG [Thread-1577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:16,321 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:16,325 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412130e96ecdac1d049b6afd90af9304bef7c_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130e96ecdac1d049b6afd90af9304bef7c_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:16,326 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/c0e06b463e504c9b84400fb9896ef869, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:16,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/c0e06b463e504c9b84400fb9896ef869 is 175, key is test_row_0/A:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:16,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742268_1444 (size=39949) 2024-12-13T21:32:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:16,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,524 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125596568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125596568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,679 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,732 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=345, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/c0e06b463e504c9b84400fb9896ef869 2024-12-13T21:32:16,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/51d911b4750a44e3920b3c57be8272b0 is 50, key is test_row_0/B:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742269_1445 (size=12301) 2024-12-13T21:32:16,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:16,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:16,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:16,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:16,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:16,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:16,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40824 deadline: 1734125597071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:17,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40822 deadline: 1734125597075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:17,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/51d911b4750a44e3920b3c57be8272b0 2024-12-13T21:32:17,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:17,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:17,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:17,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/66c9d271592142718de7daddf7eb10bf is 50, key is test_row_0/C:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:17,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742270_1446 (size=12301) 2024-12-13T21:32:17,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:17,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:17,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:17,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,354 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:32:17,460 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:17,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:17,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:17,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:17,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/66c9d271592142718de7daddf7eb10bf 2024-12-13T21:32:17,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/c0e06b463e504c9b84400fb9896ef869 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869 2024-12-13T21:32:17,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869, entries=200, sequenceid=345, filesize=39.0 K 2024-12-13T21:32:17,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/51d911b4750a44e3920b3c57be8272b0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/51d911b4750a44e3920b3c57be8272b0 2024-12-13T21:32:17,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/51d911b4750a44e3920b3c57be8272b0, entries=150, sequenceid=345, filesize=12.0 K 2024-12-13T21:32:17,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/66c9d271592142718de7daddf7eb10bf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/66c9d271592142718de7daddf7eb10bf 2024-12-13T21:32:17,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/66c9d271592142718de7daddf7eb10bf, entries=150, sequenceid=345, filesize=12.0 K 2024-12-13T21:32:17,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for be31d870e3b01c14f0b712223355e104 in 1680ms, sequenceid=345, compaction requested=true 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:17,586 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be31d870e3b01c14f0b712223355e104:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:17,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:17,586 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/A is initiating minor compaction (all files) 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/B is initiating minor compaction (all files) 2024-12-13T21:32:17,587 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/A in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,587 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/B in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,587 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/0fb2d61128984fdfbbae1d48d56ee616, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=100.8 K 2024-12-13T21:32:17,587 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0112a1e72fb241d496e01c6a126ba257, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d5243cf7d580417b8f9af2d2f584e31e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/51d911b4750a44e3920b3c57be8272b0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.8 K 2024-12-13T21:32:17,587 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/0fb2d61128984fdfbbae1d48d56ee616, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869] 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0112a1e72fb241d496e01c6a126ba257, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734125531934 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fb2d61128984fdfbbae1d48d56ee616, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734125531934 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58b1bd9e577347d2b1c2c0dae32456b0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734125533139 2024-12-13T21:32:17,587 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d5243cf7d580417b8f9af2d2f584e31e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734125533139 2024-12-13T21:32:17,588 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0e06b463e504c9b84400fb9896ef869, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1734125535290 2024-12-13T21:32:17,588 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 51d911b4750a44e3920b3c57be8272b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1734125535293 2024-12-13T21:32:17,592 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:17,593 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#B#compaction#368 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:17,593 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412136e35c640ad114636baf83b8b57673fd6_be31d870e3b01c14f0b712223355e104 store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:17,594 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/a8e582fec7d942f5a616dae27064c9d0 is 50, key is test_row_0/B:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:17,596 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412136e35c640ad114636baf83b8b57673fd6_be31d870e3b01c14f0b712223355e104, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:17,597 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412136e35c640ad114636baf83b8b57673fd6_be31d870e3b01c14f0b712223355e104 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:17,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742271_1447 (size=13153) 2024-12-13T21:32:17,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742272_1448 (size=4469) 2024-12-13T21:32:17,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:17,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:17,617 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:17,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:17,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213453888d6355c42a9af006b606268a36a_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_0/A:col10/1734125535945/Put/seqid=0 2024-12-13T21:32:17,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742273_1449 (size=12454) 2024-12-13T21:32:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:18,001 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#A#compaction#369 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:18,002 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/6722090a0b0b4bbea3c63e2e1a9a5156 is 175, key is test_row_0/A:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:18,004 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/a8e582fec7d942f5a616dae27064c9d0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/a8e582fec7d942f5a616dae27064c9d0 2024-12-13T21:32:18,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742274_1450 (size=32107) 2024-12-13T21:32:18,009 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/B of be31d870e3b01c14f0b712223355e104 into a8e582fec7d942f5a616dae27064c9d0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:18,009 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:18,009 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/B, priority=13, startTime=1734125537586; duration=0sec 2024-12-13T21:32:18,009 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:18,009 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:B 2024-12-13T21:32:18,009 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:18,010 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:18,010 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): be31d870e3b01c14f0b712223355e104/C is initiating minor compaction (all files) 2024-12-13T21:32:18,010 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be31d870e3b01c14f0b712223355e104/C in TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:18,010 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/c159bc7ef08b4bdd81a4c06c505e2755, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a44107a9740429a92dcb4723d20c8f6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/66c9d271592142718de7daddf7eb10bf] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp, totalSize=36.8 K 2024-12-13T21:32:18,011 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c159bc7ef08b4bdd81a4c06c505e2755, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734125531934 2024-12-13T21:32:18,011 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a44107a9740429a92dcb4723d20c8f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734125533139 2024-12-13T21:32:18,011 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 66c9d271592142718de7daddf7eb10bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1734125535293 2024-12-13T21:32:18,019 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be31d870e3b01c14f0b712223355e104#C#compaction#371 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:18,019 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/06091bac69ca497d8710ac82b34930ab is 50, key is test_row_0/C:col10/1734125535906/Put/seqid=0 2024-12-13T21:32:18,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742275_1451 (size=13153) 2024-12-13T21:32:18,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:18,030 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213453888d6355c42a9af006b606268a36a_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213453888d6355c42a9af006b606268a36a_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:18,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/dfcab78104fe4a02a65a31ba97677ce7, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:18,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/dfcab78104fe4a02a65a31ba97677ce7 is 175, key is test_row_0/A:col10/1734125535945/Put/seqid=0 2024-12-13T21:32:18,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742276_1452 (size=31255) 2024-12-13T21:32:18,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:18,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. as already flushing 2024-12-13T21:32:18,080 DEBUG [Thread-1566 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4fd3f5fc to 127.0.0.1:57927 2024-12-13T21:32:18,080 DEBUG [Thread-1566 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:18,084 DEBUG [Thread-1570 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42af2962 to 127.0.0.1:57927 2024-12-13T21:32:18,084 DEBUG [Thread-1570 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:18,410 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/6722090a0b0b4bbea3c63e2e1a9a5156 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/6722090a0b0b4bbea3c63e2e1a9a5156 2024-12-13T21:32:18,414 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/A of be31d870e3b01c14f0b712223355e104 into 6722090a0b0b4bbea3c63e2e1a9a5156(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:18,414 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:18,414 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/A, priority=13, startTime=1734125537586; duration=0sec 2024-12-13T21:32:18,414 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:18,414 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:A 2024-12-13T21:32:18,427 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/06091bac69ca497d8710ac82b34930ab as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/06091bac69ca497d8710ac82b34930ab 2024-12-13T21:32:18,431 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be31d870e3b01c14f0b712223355e104/C of be31d870e3b01c14f0b712223355e104 into 06091bac69ca497d8710ac82b34930ab(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:18,431 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:18,431 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104., storeName=be31d870e3b01c14f0b712223355e104/C, priority=13, startTime=1734125537586; duration=0sec 2024-12-13T21:32:18,431 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:18,431 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be31d870e3b01c14f0b712223355e104:C 2024-12-13T21:32:18,435 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/dfcab78104fe4a02a65a31ba97677ce7 2024-12-13T21:32:18,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/7edbdb0297c0483aba55d5ad9bde8b81 is 50, key is test_row_0/B:col10/1734125535945/Put/seqid=0 2024-12-13T21:32:18,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742277_1453 (size=12301) 2024-12-13T21:32:18,846 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/7edbdb0297c0483aba55d5ad9bde8b81 2024-12-13T21:32:18,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5683981c905947c5ab3bfa1ad5d5874e is 50, key is test_row_0/C:col10/1734125535945/Put/seqid=0 2024-12-13T21:32:18,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742278_1454 (size=12301) 2024-12-13T21:32:19,256 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5683981c905947c5ab3bfa1ad5d5874e 2024-12-13T21:32:19,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/dfcab78104fe4a02a65a31ba97677ce7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/dfcab78104fe4a02a65a31ba97677ce7 2024-12-13T21:32:19,263 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/dfcab78104fe4a02a65a31ba97677ce7, entries=150, sequenceid=366, filesize=30.5 K 2024-12-13T21:32:19,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/7edbdb0297c0483aba55d5ad9bde8b81 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/7edbdb0297c0483aba55d5ad9bde8b81 2024-12-13T21:32:19,267 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/7edbdb0297c0483aba55d5ad9bde8b81, entries=150, sequenceid=366, filesize=12.0 K 2024-12-13T21:32:19,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/5683981c905947c5ab3bfa1ad5d5874e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5683981c905947c5ab3bfa1ad5d5874e 2024-12-13T21:32:19,271 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5683981c905947c5ab3bfa1ad5d5874e, entries=150, sequenceid=366, filesize=12.0 K 2024-12-13T21:32:19,271 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=13.42 KB/13740 for be31d870e3b01c14f0b712223355e104 in 1654ms, sequenceid=366, compaction requested=false 2024-12-13T21:32:19,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:19,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:19,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-13T21:32:19,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-13T21:32:19,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-13T21:32:19,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5100 sec 2024-12-13T21:32:19,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 3.5140 sec 2024-12-13T21:32:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T21:32:19,868 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-13T21:32:22,098 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/b68a2e43e4f444a6a44fd6bc9ab37707, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1b286a45307e4df9a07a4bfc98e61dc2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/7b0dbaa2d254490986244c1d530c7b60, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bc6a315af18843eca94ca03841bed379, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/faa24a2f089a4bdaaaf464c4b18ff131, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/af4e404b7b024a04bc38825f032eec78, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/0fb2d61128984fdfbbae1d48d56ee616, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869] to archive 2024-12-13T21:32:22,099 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/b68a2e43e4f444a6a44fd6bc9ab37707 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/b68a2e43e4f444a6a44fd6bc9ab37707 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/cefae8128b244ccc991b756bedc18d3c 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/87ccb5d61f0e454e98c3c9c4ddd6b50e 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/3b08a57e1c62448dadd3aa9fe8baa183 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bcffbeac7892468bbb7884db9c37f5d4 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1b286a45307e4df9a07a4bfc98e61dc2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1b286a45307e4df9a07a4bfc98e61dc2 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1df51adabf5743e3b5c7e2ff28ba32ce 2024-12-13T21:32:22,101 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fe8bffef6e544a51b7f1c230485767b6 2024-12-13T21:32:22,103 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/1fcbea5142604d09a0af8adbb2cf484a 2024-12-13T21:32:22,103 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/85b3a6b5427c47e98a801d860f25bf2b 2024-12-13T21:32:22,103 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/7b0dbaa2d254490986244c1d530c7b60 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/7b0dbaa2d254490986244c1d530c7b60 2024-12-13T21:32:22,103 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/fadbea4b9df94fb89d47326529588fbe 2024-12-13T21:32:22,103 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bc6a315af18843eca94ca03841bed379 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bc6a315af18843eca94ca03841bed379 2024-12-13T21:32:22,103 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/54f6386cf6384d409c33dc2dd22e1511 2024-12-13T21:32:22,104 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d202d7abd8e46bb928948fee54c02a3 2024-12-13T21:32:22,104 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/8d5ebfcf077d4132867204b4ea6f7bcc 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/a1adcc55b6214bb2bc47ef622375969a 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/faa24a2f089a4bdaaaf464c4b18ff131 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/faa24a2f089a4bdaaaf464c4b18ff131 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/bfeb1cbec21a488db73c28f75989e229 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/af4e404b7b024a04bc38825f032eec78 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/af4e404b7b024a04bc38825f032eec78 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/edf766ed44f24531abf2a2d5d8dc0044 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/78688a92576741b18ff781eb832bb9d6 2024-12-13T21:32:22,105 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/0fb2d61128984fdfbbae1d48d56ee616 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/0fb2d61128984fdfbbae1d48d56ee616 2024-12-13T21:32:22,106 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/58b1bd9e577347d2b1c2c0dae32456b0 2024-12-13T21:32:22,106 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/c0e06b463e504c9b84400fb9896ef869 2024-12-13T21:32:22,109 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9e1cc48b12c74995a67ec9925c8ee179, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3f2df5caf1de4eedb5f910906c330298, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/20b0c3f22aed4050b530eb3e99209eca, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/add45aebfd4d4d9fb8463811454cc661, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/5eebcddf3ad14f7389d9a4f9a886051c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b6be3758ebd2429faffab1c54854d167, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/6a943d36b84345f791dce9cdaab8f058, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d4f03c70775e4d24b4444347ca48888d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/60cbf8b892d14301a006cf6ca9dd97d7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/fed4fd7ed5ed44648858f1c66c75d140, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/12727f2cb83b47319dc63573277f2cb8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/89bb805083f0447891e919ec18044a98, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/c9552aeb39cd4e08a2423d039c910d69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/f0f44ba227f24f99ba8656f08b132d74, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0d06fa05f58147d6a48742512878086a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9b9fd99238654f009f0e92aaa281a458, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/03e506eee76148e58b827f8bb09e79c3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/96e908ad70ea431ebf007f4181c505c1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3bc3596e253c49a497a99c793aac4a4c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b5e47c7d50b14165b71fd09c55759881, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/27ea084cc2894c53b8538b2d07814469, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0112a1e72fb241d496e01c6a126ba257, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/2f64358106a54eb9b99e502140dfed68, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d5243cf7d580417b8f9af2d2f584e31e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/51d911b4750a44e3920b3c57be8272b0] to archive 2024-12-13T21:32:22,109 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:32:22,112 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/add45aebfd4d4d9fb8463811454cc661 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/add45aebfd4d4d9fb8463811454cc661 2024-12-13T21:32:22,112 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/20b0c3f22aed4050b530eb3e99209eca to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/20b0c3f22aed4050b530eb3e99209eca 2024-12-13T21:32:22,112 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b6be3758ebd2429faffab1c54854d167 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b6be3758ebd2429faffab1c54854d167 2024-12-13T21:32:22,112 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/5eebcddf3ad14f7389d9a4f9a886051c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/5eebcddf3ad14f7389d9a4f9a886051c 2024-12-13T21:32:22,112 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9e1cc48b12c74995a67ec9925c8ee179 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9e1cc48b12c74995a67ec9925c8ee179 2024-12-13T21:32:22,112 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3f2df5caf1de4eedb5f910906c330298 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3f2df5caf1de4eedb5f910906c330298 2024-12-13T21:32:22,113 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d4f03c70775e4d24b4444347ca48888d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d4f03c70775e4d24b4444347ca48888d 2024-12-13T21:32:22,113 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/6a943d36b84345f791dce9cdaab8f058 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/6a943d36b84345f791dce9cdaab8f058 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/fed4fd7ed5ed44648858f1c66c75d140 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/fed4fd7ed5ed44648858f1c66c75d140 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/89bb805083f0447891e919ec18044a98 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/89bb805083f0447891e919ec18044a98 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/f0f44ba227f24f99ba8656f08b132d74 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/f0f44ba227f24f99ba8656f08b132d74 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/c9552aeb39cd4e08a2423d039c910d69 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/c9552aeb39cd4e08a2423d039c910d69 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0d06fa05f58147d6a48742512878086a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0d06fa05f58147d6a48742512878086a 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9b9fd99238654f009f0e92aaa281a458 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/9b9fd99238654f009f0e92aaa281a458 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/12727f2cb83b47319dc63573277f2cb8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/12727f2cb83b47319dc63573277f2cb8 2024-12-13T21:32:22,114 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/60cbf8b892d14301a006cf6ca9dd97d7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/60cbf8b892d14301a006cf6ca9dd97d7 2024-12-13T21:32:22,115 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/03e506eee76148e58b827f8bb09e79c3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/03e506eee76148e58b827f8bb09e79c3 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3bc3596e253c49a497a99c793aac4a4c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/3bc3596e253c49a497a99c793aac4a4c 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/96e908ad70ea431ebf007f4181c505c1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/96e908ad70ea431ebf007f4181c505c1 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0112a1e72fb241d496e01c6a126ba257 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/0112a1e72fb241d496e01c6a126ba257 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/2f64358106a54eb9b99e502140dfed68 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/2f64358106a54eb9b99e502140dfed68 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b5e47c7d50b14165b71fd09c55759881 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/b5e47c7d50b14165b71fd09c55759881 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/27ea084cc2894c53b8538b2d07814469 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/27ea084cc2894c53b8538b2d07814469 2024-12-13T21:32:22,116 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d5243cf7d580417b8f9af2d2f584e31e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/d5243cf7d580417b8f9af2d2f584e31e 2024-12-13T21:32:22,117 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/51d911b4750a44e3920b3c57be8272b0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/51d911b4750a44e3920b3c57be8272b0 2024-12-13T21:32:22,120 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a468ba7e5d2a46519cd749fb55dc38d0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/90a6fedc5f2b4f56927d6d16d2a38d7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/6deb2140bcc9436eb2f81ed576163611, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b208797d89484f97beb2d327932f3f4a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a6a3b3437754b719c3a8a4d93539a21, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/4e803a4b54a341eeb6e40b20a1b47a61, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/505706b0d93342ae87bc42e8a817198b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a22f91380ec04f1a9a9996094265633e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b7b0292b6f874c1096033298e998a2fc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/380987ae6d6f41838d71de81be9d543e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/903221380b8e40b2888d6359ac72b6cd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a1cbfd87cf6e491097e4fea5172d9fbd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/9f2df22a867f44ee8b334e104154113b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/44c01810e0794445a3fee88d7b3f8b12, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/968dc09b281948eda6321b5f1c2bd1f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a01f669fcfde42e1b84157b8f1bbd444, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a3cebf7b110d4332a824691b8bd73dbd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/e51092f385514d8abe5faf7be7473c5a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/bfa7eb49533244f78f2f005368fbdba2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/8193c2721bf04d5e8ca62ac71cb602b5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/7d2f80b934ab4473b4b037fe57544037, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/c159bc7ef08b4bdd81a4c06c505e2755, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/f87d8d641b9d40d0b70e3de6d1b45060, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a44107a9740429a92dcb4723d20c8f6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/66c9d271592142718de7daddf7eb10bf] to archive 2024-12-13T21:32:22,121 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/fd052dae32be:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b208797d89484f97beb2d327932f3f4a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b208797d89484f97beb2d327932f3f4a 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/90a6fedc5f2b4f56927d6d16d2a38d7e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/90a6fedc5f2b4f56927d6d16d2a38d7e 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/6deb2140bcc9436eb2f81ed576163611 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/6deb2140bcc9436eb2f81ed576163611 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a468ba7e5d2a46519cd749fb55dc38d0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a468ba7e5d2a46519cd749fb55dc38d0 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a6a3b3437754b719c3a8a4d93539a21 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a6a3b3437754b719c3a8a4d93539a21 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/4e803a4b54a341eeb6e40b20a1b47a61 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/4e803a4b54a341eeb6e40b20a1b47a61 2024-12-13T21:32:22,123 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/505706b0d93342ae87bc42e8a817198b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/505706b0d93342ae87bc42e8a817198b 2024-12-13T21:32:22,124 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a22f91380ec04f1a9a9996094265633e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a22f91380ec04f1a9a9996094265633e 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/903221380b8e40b2888d6359ac72b6cd to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/903221380b8e40b2888d6359ac72b6cd 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/9f2df22a867f44ee8b334e104154113b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/9f2df22a867f44ee8b334e104154113b 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/968dc09b281948eda6321b5f1c2bd1f1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/968dc09b281948eda6321b5f1c2bd1f1 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/44c01810e0794445a3fee88d7b3f8b12 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/44c01810e0794445a3fee88d7b3f8b12 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a1cbfd87cf6e491097e4fea5172d9fbd to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a1cbfd87cf6e491097e4fea5172d9fbd 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b7b0292b6f874c1096033298e998a2fc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/b7b0292b6f874c1096033298e998a2fc 2024-12-13T21:32:22,125 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/380987ae6d6f41838d71de81be9d543e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/380987ae6d6f41838d71de81be9d543e 2024-12-13T21:32:22,126 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a01f669fcfde42e1b84157b8f1bbd444 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a01f669fcfde42e1b84157b8f1bbd444 2024-12-13T21:32:22,126 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a3cebf7b110d4332a824691b8bd73dbd to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/a3cebf7b110d4332a824691b8bd73dbd 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/7d2f80b934ab4473b4b037fe57544037 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/7d2f80b934ab4473b4b037fe57544037 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/e51092f385514d8abe5faf7be7473c5a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/e51092f385514d8abe5faf7be7473c5a 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/bfa7eb49533244f78f2f005368fbdba2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/bfa7eb49533244f78f2f005368fbdba2 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/8193c2721bf04d5e8ca62ac71cb602b5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/8193c2721bf04d5e8ca62ac71cb602b5 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/f87d8d641b9d40d0b70e3de6d1b45060 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/f87d8d641b9d40d0b70e3de6d1b45060 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/c159bc7ef08b4bdd81a4c06c505e2755 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/c159bc7ef08b4bdd81a4c06c505e2755 2024-12-13T21:32:22,127 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a44107a9740429a92dcb4723d20c8f6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5a44107a9740429a92dcb4723d20c8f6 2024-12-13T21:32:22,128 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/66c9d271592142718de7daddf7eb10bf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/66c9d271592142718de7daddf7eb10bf 2024-12-13T21:32:22,580 DEBUG [Thread-1568 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05c97513 to 127.0.0.1:57927 2024-12-13T21:32:22,580 DEBUG [Thread-1568 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:22,592 DEBUG [Thread-1572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5910b8c7 to 127.0.0.1:57927 2024-12-13T21:32:22,592 DEBUG [Thread-1572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:22,617 DEBUG [Thread-1564 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1cbce2b4 to 127.0.0.1:57927 2024-12-13T21:32:22,617 DEBUG [Thread-1564 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2993 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8979 rows 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2967 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8901 rows 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3000 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9000 rows 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2970 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8910 rows 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2962 2024-12-13T21:32:22,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8886 rows 2024-12-13T21:32:22,618 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:32:22,618 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1acf826f to 127.0.0.1:57927 2024-12-13T21:32:22,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:32:22,619 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-13T21:32:22,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-12-13T21:32:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:22,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:22,624 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125542623"}]},"ts":"1734125542623"} 2024-12-13T21:32:22,624 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-13T21:32:22,644 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-13T21:32:22,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:32:22,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, UNASSIGN}] 2024-12-13T21:32:22,651 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, UNASSIGN 2024-12-13T21:32:22,654 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:32:22,655 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:32:22,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; CloseRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:32:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:22,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:22,808 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(124): Close be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1681): Closing be31d870e3b01c14f0b712223355e104, disabling compactions & flushes 2024-12-13T21:32:22,808 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. after waiting 0 ms 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:22,808 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(2837): Flushing be31d870e3b01c14f0b712223355e104 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=A 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=B 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be31d870e3b01c14f0b712223355e104, store=C 2024-12-13T21:32:22,808 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:22,814 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213e514d48f28a64106a2c09b23aafa5155_be31d870e3b01c14f0b712223355e104 is 50, key is test_row_1/A:col10/1734125542591/Put/seqid=0 2024-12-13T21:32:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742279_1455 (size=9914) 2024-12-13T21:32:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:23,219 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:23,225 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213e514d48f28a64106a2c09b23aafa5155_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213e514d48f28a64106a2c09b23aafa5155_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:23,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:23,226 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/aa002f587edf42fc92e4583c6c11bdd1, store: [table=TestAcidGuarantees family=A region=be31d870e3b01c14f0b712223355e104] 2024-12-13T21:32:23,227 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/aa002f587edf42fc92e4583c6c11bdd1 is 175, key is test_row_1/A:col10/1734125542591/Put/seqid=0 2024-12-13T21:32:23,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742280_1456 (size=22561) 2024-12-13T21:32:23,633 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/aa002f587edf42fc92e4583c6c11bdd1 2024-12-13T21:32:23,640 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/64388fa51ce040e79fd217f20b52e22a is 50, key is test_row_1/B:col10/1734125542591/Put/seqid=0 2024-12-13T21:32:23,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742281_1457 (size=9857) 2024-12-13T21:32:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:24,044 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/64388fa51ce040e79fd217f20b52e22a 2024-12-13T21:32:24,051 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/141aafbfca3e421a8241f079fd283f6e is 50, key is test_row_1/C:col10/1734125542591/Put/seqid=0 2024-12-13T21:32:24,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742282_1458 (size=9857) 2024-12-13T21:32:24,460 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/141aafbfca3e421a8241f079fd283f6e 2024-12-13T21:32:24,466 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/A/aa002f587edf42fc92e4583c6c11bdd1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/aa002f587edf42fc92e4583c6c11bdd1 2024-12-13T21:32:24,470 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/aa002f587edf42fc92e4583c6c11bdd1, entries=100, sequenceid=377, filesize=22.0 K 2024-12-13T21:32:24,470 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/B/64388fa51ce040e79fd217f20b52e22a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/64388fa51ce040e79fd217f20b52e22a 2024-12-13T21:32:24,475 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/64388fa51ce040e79fd217f20b52e22a, entries=100, sequenceid=377, filesize=9.6 K 2024-12-13T21:32:24,477 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/.tmp/C/141aafbfca3e421a8241f079fd283f6e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/141aafbfca3e421a8241f079fd283f6e 2024-12-13T21:32:24,481 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/141aafbfca3e421a8241f079fd283f6e, entries=100, sequenceid=377, filesize=9.6 K 2024-12-13T21:32:24,482 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for be31d870e3b01c14f0b712223355e104 in 1674ms, sequenceid=377, compaction requested=true 2024-12-13T21:32:24,488 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/recovered.edits/380.seqid, newMaxSeqId=380, maxSeqId=4 2024-12-13T21:32:24,489 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104. 2024-12-13T21:32:24,489 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1635): Region close journal for be31d870e3b01c14f0b712223355e104: 2024-12-13T21:32:24,491 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(170): Closed be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:24,491 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=be31d870e3b01c14f0b712223355e104, regionState=CLOSED 2024-12-13T21:32:24,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-13T21:32:24,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseRegionProcedure be31d870e3b01c14f0b712223355e104, server=fd052dae32be,38989,1734125418878 in 1.8360 sec 2024-12-13T21:32:24,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-12-13T21:32:24,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=be31d870e3b01c14f0b712223355e104, UNASSIGN in 1.8490 sec 2024-12-13T21:32:24,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-13T21:32:24,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8530 sec 2024-12-13T21:32:24,501 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125544501"}]},"ts":"1734125544501"} 2024-12-13T21:32:24,502 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-13T21:32:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:24,754 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-13T21:32:24,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.1350 sec 2024-12-13T21:32:26,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-13T21:32:26,731 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-13T21:32:26,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-12-13T21:32:26,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:26,732 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:26,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-13T21:32:26,733 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:26,733 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,735 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/recovered.edits] 2024-12-13T21:32:26,737 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/6722090a0b0b4bbea3c63e2e1a9a5156 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/6722090a0b0b4bbea3c63e2e1a9a5156 2024-12-13T21:32:26,737 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/dfcab78104fe4a02a65a31ba97677ce7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/dfcab78104fe4a02a65a31ba97677ce7 2024-12-13T21:32:26,737 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/aa002f587edf42fc92e4583c6c11bdd1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/A/aa002f587edf42fc92e4583c6c11bdd1 2024-12-13T21:32:26,739 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/7edbdb0297c0483aba55d5ad9bde8b81 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/7edbdb0297c0483aba55d5ad9bde8b81 2024-12-13T21:32:26,739 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/64388fa51ce040e79fd217f20b52e22a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/64388fa51ce040e79fd217f20b52e22a 2024-12-13T21:32:26,739 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/a8e582fec7d942f5a616dae27064c9d0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/B/a8e582fec7d942f5a616dae27064c9d0 2024-12-13T21:32:26,741 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/06091bac69ca497d8710ac82b34930ab to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/06091bac69ca497d8710ac82b34930ab 2024-12-13T21:32:26,741 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/141aafbfca3e421a8241f079fd283f6e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/141aafbfca3e421a8241f079fd283f6e 2024-12-13T21:32:26,742 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5683981c905947c5ab3bfa1ad5d5874e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/C/5683981c905947c5ab3bfa1ad5d5874e 2024-12-13T21:32:26,743 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/recovered.edits/380.seqid to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104/recovered.edits/380.seqid 2024-12-13T21:32:26,744 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,744 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-13T21:32:26,744 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-13T21:32:26,745 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-13T21:32:26,751 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130919f97ad42747cb99832b93b975ee3a_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130919f97ad42747cb99832b93b975ee3a_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,751 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121301f3a2414c2548fcb4dc7c9f51551d56_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121301f3a2414c2548fcb4dc7c9f51551d56_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,751 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130cc5908a53f14760aec7d4c418db04f1_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130cc5908a53f14760aec7d4c418db04f1_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,751 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121314f67d7fcdd9421181317617034b5a94_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121314f67d7fcdd9421181317617034b5a94_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,751 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213231d29d2135b42409cd0fa82e5d3bff0_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213231d29d2135b42409cd0fa82e5d3bff0_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,752 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130e96ecdac1d049b6afd90af9304bef7c_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412130e96ecdac1d049b6afd90af9304bef7c_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,752 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213154fa6e8f9ef460dbbcb9efd1726cdcd_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213154fa6e8f9ef460dbbcb9efd1726cdcd_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,752 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121332ca8e8341d746fb84ad9cc92660ab36_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121332ca8e8341d746fb84ad9cc92660ab36_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,753 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412133b3086c6a1ea43fc84b457a72f86a706_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412133b3086c6a1ea43fc84b457a72f86a706_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,753 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213499076b1e46843f78b21a3b13bc52708_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213499076b1e46843f78b21a3b13bc52708_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,753 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412134eef209b323a4b2382265e2e01941d0b_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412134eef209b323a4b2382265e2e01941d0b_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,754 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213453888d6355c42a9af006b606268a36a_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213453888d6355c42a9af006b606268a36a_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,754 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412139364a0a350b04ccca38a7c3ef9ebf93e_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412139364a0a350b04ccca38a7c3ef9ebf93e_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,754 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a0f52d7540c8474fbd143e7f1ce3ece2_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a0f52d7540c8474fbd143e7f1ce3ece2_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,754 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a02cd6fbc25f4f44847b5d9af3661815_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a02cd6fbc25f4f44847b5d9af3661815_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,755 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a8b65df167964a61b49f2394c43cfcc5_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213a8b65df167964a61b49f2394c43cfcc5_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,757 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213e514d48f28a64106a2c09b23aafa5155_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213e514d48f28a64106a2c09b23aafa5155_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,757 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213d96116249ffd4ae6b6451207fe71682f_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213d96116249ffd4ae6b6451207fe71682f_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,757 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ffafd53cdb384195ba769a3244e51e4a_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ffafd53cdb384195ba769a3244e51e4a_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,757 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f591f3dea6d9446499bda216a947e272_be31d870e3b01c14f0b712223355e104 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f591f3dea6d9446499bda216a947e272_be31d870e3b01c14f0b712223355e104 2024-12-13T21:32:26,758 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-13T21:32:26,760 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:26,762 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-13T21:32:26,763 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-13T21:32:26,765 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:26,765 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-13T21:32:26,765 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734125546765"}]},"ts":"9223372036854775807"} 2024-12-13T21:32:26,766 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T21:32:26,766 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => be31d870e3b01c14f0b712223355e104, NAME => 'TestAcidGuarantees,,1734125513050.be31d870e3b01c14f0b712223355e104.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T21:32:26,766 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-13T21:32:26,766 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734125546766"}]},"ts":"9223372036854775807"} 2024-12-13T21:32:26,767 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-13T21:32:26,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-13T21:32:27,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-13T21:32:27,220 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:27,221 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 489 msec 2024-12-13T21:32:27,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-13T21:32:27,337 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-13T21:32:27,348 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=244 (was 247), OpenFileDescriptor=450 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=439 (was 444), ProcessCount=11 (was 12), AvailableMemoryMB=1287 (was 3607) 2024-12-13T21:32:27,359 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=439, ProcessCount=11, AvailableMemoryMB=1285 2024-12-13T21:32:27,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:32:27,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:32:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-13T21:32:27,363 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:32:27,363 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:27,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 131 2024-12-13T21:32:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:27,363 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:32:27,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742283_1459 (size=963) 2024-12-13T21:32:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:27,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:27,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:28,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:28,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-13T21:32:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:30,013 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:32:31,151 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:32:31,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:36,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742284_1460 (size=53) 2024-12-13T21:32:36,678 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:32:36,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 836c7ebcedb8aba974e5bf30d5802cfc, disabling compactions & flushes 2024-12-13T21:32:36,679 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:36,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:36,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. after waiting 0 ms 2024-12-13T21:32:36,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:36,679 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:36,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:36,680 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:32:36,680 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734125556680"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125556680"}]},"ts":"1734125556680"} 2024-12-13T21:32:36,682 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:32:36,683 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:32:36,683 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125556683"}]},"ts":"1734125556683"} 2024-12-13T21:32:36,685 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-13T21:32:41,925 WARN [WorkerMonitor {}] procedure2.ProcedureExecutor$WorkerMonitor(2221): Worker stuck PEWorker-2(pid=131), run time 14.5620 sec 2024-12-13T21:32:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:45,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, ASSIGN}] 2024-12-13T21:32:45,611 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, ASSIGN 2024-12-13T21:32:45,612 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:32:45,762 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=836c7ebcedb8aba974e5bf30d5802cfc, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:32:45,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; OpenRegionProcedure 836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:32:45,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:45,924 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:45,924 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7285): Opening region: {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:32:45,925 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,925 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:32:45,925 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7327): checking encryption for 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,925 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7330): checking classloading for 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,927 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,928 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:32:45,928 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 836c7ebcedb8aba974e5bf30d5802cfc columnFamilyName A 2024-12-13T21:32:45,928 DEBUG [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:45,929 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.HStore(327): Store=836c7ebcedb8aba974e5bf30d5802cfc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:32:45,929 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,930 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:32:45,930 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 836c7ebcedb8aba974e5bf30d5802cfc columnFamilyName B 2024-12-13T21:32:45,930 DEBUG [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:45,931 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.HStore(327): Store=836c7ebcedb8aba974e5bf30d5802cfc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:32:45,931 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,932 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:32:45,932 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 836c7ebcedb8aba974e5bf30d5802cfc columnFamilyName C 2024-12-13T21:32:45,932 DEBUG [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:32:45,933 INFO [StoreOpener-836c7ebcedb8aba974e5bf30d5802cfc-1 {}] regionserver.HStore(327): Store=836c7ebcedb8aba974e5bf30d5802cfc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:32:45,933 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:45,933 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,934 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,935 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:32:45,936 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1085): writing seq id for 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:45,939 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:32:45,939 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1102): Opened 836c7ebcedb8aba974e5bf30d5802cfc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59866586, jitterRate=-0.10791835188865662}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:32:45,939 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1001): Region open journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:45,940 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., pid=133, masterSystemTime=1734125565917 2024-12-13T21:32:45,941 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:45,941 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:45,942 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=836c7ebcedb8aba974e5bf30d5802cfc, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:32:45,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-13T21:32:45,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; OpenRegionProcedure 836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 in 178 msec 2024-12-13T21:32:45,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-12-13T21:32:45,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, ASSIGN in 334 msec 2024-12-13T21:32:45,946 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:32:45,946 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125565946"}]},"ts":"1734125565946"} 2024-12-13T21:32:45,947 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-13T21:32:46,349 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:32:46,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 18.9880 sec 2024-12-13T21:32:46,688 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-13T21:32:47,355 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:32:48,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-13T21:32:48,564 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-13T21:32:51,440 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:32:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-12-13T21:32:55,479 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 131 completed 2024-12-13T21:32:55,480 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x315a23ef to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65e17c26 2024-12-13T21:32:55,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3ee89e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,525 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,526 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36308, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,527 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:32:55,527 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:32:55,529 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d125972 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53fc02ba 2024-12-13T21:32:55,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0e6a43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,541 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x134bfe32 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2011d733 2024-12-13T21:32:55,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8e5fd00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b55f2f to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39b3baa5 2024-12-13T21:32:55,565 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e195d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,566 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x646ca555 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@402e5def 2024-12-13T21:32:55,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14088aa9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x224e54da to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10bda459 2024-12-13T21:32:55,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40302925, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0657e1bf to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47ef9951 2024-12-13T21:32:55,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784d683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,603 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dee2855 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@567011a8 2024-12-13T21:32:55,615 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7761f52b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,615 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54e8a98a to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2430fee 2024-12-13T21:32:55,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a736a20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,628 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x677030bd to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d567fc2 2024-12-13T21:32:55,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c153822, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,636 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27861032 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69e0c36f 2024-12-13T21:32:55,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c813259, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:32:55,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-13T21:32:55,648 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:55,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:55,648 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:55,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:55,650 DEBUG [hconnection-0x616ad240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,651 DEBUG [hconnection-0x58fc211d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,651 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,651 DEBUG [hconnection-0x198cc4bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,651 DEBUG [hconnection-0x4b0820b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,651 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,651 DEBUG [hconnection-0x6d83373d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,652 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,652 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36338, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,652 DEBUG [hconnection-0x2b775ec8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,652 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,653 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:55,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:32:55,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:32:55,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:55,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:32:55,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:55,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:32:55,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:55,659 DEBUG [hconnection-0x138d91de-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,660 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,664 DEBUG [hconnection-0x4f8caa82-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,665 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125635666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125635666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125635666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125635667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125635667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,671 DEBUG [hconnection-0x30e66db6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,671 DEBUG [hconnection-0x24748f9e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:32:55,672 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,672 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:32:55,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/db64a71babba46be9c4a1700ed85ed28 is 50, key is test_row_0/A:col10/1734125575655/Put/seqid=0 2024-12-13T21:32:55,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742285_1461 (size=12001) 2024-12-13T21:32:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:55,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125635767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125635767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125635767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125635768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125635768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:55,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:55,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:55,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:55,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:55,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:55,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:55,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:55,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:55,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:55,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:55,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:55,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125635969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125635970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125635970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125635970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:55,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125635970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/db64a71babba46be9c4a1700ed85ed28 2024-12-13T21:32:56,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:56,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:56,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/7ee33d515e534470b484338305a9c66d is 50, key is test_row_0/B:col10/1734125575655/Put/seqid=0 2024-12-13T21:32:56,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742286_1462 (size=12001) 2024-12-13T21:32:56,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:56,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:56,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:56,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125636271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125636272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125636273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125636274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125636275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:56,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:56,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/7ee33d515e534470b484338305a9c66d 2024-12-13T21:32:56,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:56,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:56,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/3fffd23dd442433cb394309394862c9b is 50, key is test_row_0/C:col10/1734125575655/Put/seqid=0 2024-12-13T21:32:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742287_1463 (size=12001) 2024-12-13T21:32:56,713 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:56,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:56,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:56,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125636775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125636777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125636778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125636779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:56,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125636780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,866 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:56,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:56,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:56,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:56,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:56,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/3fffd23dd442433cb394309394862c9b 2024-12-13T21:32:56,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/db64a71babba46be9c4a1700ed85ed28 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/db64a71babba46be9c4a1700ed85ed28 2024-12-13T21:32:56,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/db64a71babba46be9c4a1700ed85ed28, entries=150, sequenceid=14, filesize=11.7 K 2024-12-13T21:32:56,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/7ee33d515e534470b484338305a9c66d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7ee33d515e534470b484338305a9c66d 2024-12-13T21:32:56,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7ee33d515e534470b484338305a9c66d, entries=150, sequenceid=14, filesize=11.7 K 2024-12-13T21:32:56,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/3fffd23dd442433cb394309394862c9b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/3fffd23dd442433cb394309394862c9b 2024-12-13T21:32:57,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/3fffd23dd442433cb394309394862c9b, entries=150, sequenceid=14, filesize=11.7 K 2024-12-13T21:32:57,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 836c7ebcedb8aba974e5bf30d5802cfc in 1345ms, sequenceid=14, compaction requested=false 2024-12-13T21:32:57,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:57,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-13T21:32:57,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:57,019 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:32:57,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:32:57,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:57,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:32:57,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:57,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:32:57,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:57,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/90b27a70c6674030b0cc97680605b607 is 50, key is test_row_0/A:col10/1734125575666/Put/seqid=0 2024-12-13T21:32:57,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742288_1464 (size=12001) 2024-12-13T21:32:57,457 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/90b27a70c6674030b0cc97680605b607 2024-12-13T21:32:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/0c9f9b152522496ab54bd8f3994e8c6d is 50, key is test_row_0/B:col10/1734125575666/Put/seqid=0 2024-12-13T21:32:57,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742289_1465 (size=12001) 2024-12-13T21:32:57,467 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/0c9f9b152522496ab54bd8f3994e8c6d 2024-12-13T21:32:57,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bb588b7c77aa40f381eac5b6e5bb411e is 50, key is test_row_0/C:col10/1734125575666/Put/seqid=0 2024-12-13T21:32:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742290_1466 (size=12001) 2024-12-13T21:32:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:57,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:57,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125637792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125637792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125637793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125637793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125637794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,882 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bb588b7c77aa40f381eac5b6e5bb411e 2024-12-13T21:32:57,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/90b27a70c6674030b0cc97680605b607 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/90b27a70c6674030b0cc97680605b607 2024-12-13T21:32:57,888 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/90b27a70c6674030b0cc97680605b607, entries=150, sequenceid=38, filesize=11.7 K 2024-12-13T21:32:57,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/0c9f9b152522496ab54bd8f3994e8c6d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/0c9f9b152522496ab54bd8f3994e8c6d 2024-12-13T21:32:57,891 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/0c9f9b152522496ab54bd8f3994e8c6d, entries=150, sequenceid=38, filesize=11.7 K 2024-12-13T21:32:57,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bb588b7c77aa40f381eac5b6e5bb411e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb588b7c77aa40f381eac5b6e5bb411e 2024-12-13T21:32:57,895 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb588b7c77aa40f381eac5b6e5bb411e, entries=150, sequenceid=38, filesize=11.7 K 2024-12-13T21:32:57,896 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 836c7ebcedb8aba974e5bf30d5802cfc in 878ms, sequenceid=38, compaction requested=false 2024-12-13T21:32:57,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:57,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:57,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-13T21:32:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-13T21:32:57,899 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-13T21:32:57,899 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2490 sec 2024-12-13T21:32:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:57,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:32:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:32:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:32:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:32:57,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:57,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.2520 sec 2024-12-13T21:32:57,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/edd6b3b30bfd4622bedc218d34e9b9cd is 50, key is test_row_0/A:col10/1734125577898/Put/seqid=0 2024-12-13T21:32:57,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742291_1467 (size=14341) 2024-12-13T21:32:57,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/edd6b3b30bfd4622bedc218d34e9b9cd 2024-12-13T21:32:57,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/74c1c4fe6ccd437faa77cd6340ac22dc is 50, key is test_row_0/B:col10/1734125577898/Put/seqid=0 2024-12-13T21:32:57,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742292_1468 (size=12001) 2024-12-13T21:32:57,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125637926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125637926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125637927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125637929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:57,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125637930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125638032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125638032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125638033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125638037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125638038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125638236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125638236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125638236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125638242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125638243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/74c1c4fe6ccd437faa77cd6340ac22dc 2024-12-13T21:32:58,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9f7b5b77720a44f5b4f38889869e412c is 50, key is test_row_0/C:col10/1734125577898/Put/seqid=0 2024-12-13T21:32:58,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742293_1469 (size=12001) 2024-12-13T21:32:58,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9f7b5b77720a44f5b4f38889869e412c 2024-12-13T21:32:58,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/edd6b3b30bfd4622bedc218d34e9b9cd as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/edd6b3b30bfd4622bedc218d34e9b9cd 2024-12-13T21:32:58,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/edd6b3b30bfd4622bedc218d34e9b9cd, entries=200, sequenceid=51, filesize=14.0 K 2024-12-13T21:32:58,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/74c1c4fe6ccd437faa77cd6340ac22dc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/74c1c4fe6ccd437faa77cd6340ac22dc 2024-12-13T21:32:58,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/74c1c4fe6ccd437faa77cd6340ac22dc, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:32:58,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9f7b5b77720a44f5b4f38889869e412c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9f7b5b77720a44f5b4f38889869e412c 2024-12-13T21:32:58,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9f7b5b77720a44f5b4f38889869e412c, entries=150, sequenceid=51, filesize=11.7 K 2024-12-13T21:32:58,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 836c7ebcedb8aba974e5bf30d5802cfc in 441ms, sequenceid=51, compaction requested=true 2024-12-13T21:32:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:58,341 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:32:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:32:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:58,342 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:32:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:32:58,343 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:58,343 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:58,343 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/db64a71babba46be9c4a1700ed85ed28, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/90b27a70c6674030b0cc97680605b607, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/edd6b3b30bfd4622bedc218d34e9b9cd] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=37.4 K 2024-12-13T21:32:58,343 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7ee33d515e534470b484338305a9c66d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/0c9f9b152522496ab54bd8f3994e8c6d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/74c1c4fe6ccd437faa77cd6340ac22dc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.2 K 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ee33d515e534470b484338305a9c66d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734125575655 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting db64a71babba46be9c4a1700ed85ed28, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734125575655 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c9f9b152522496ab54bd8f3994e8c6d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734125575665 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90b27a70c6674030b0cc97680605b607, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734125575665 2024-12-13T21:32:58,343 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting edd6b3b30bfd4622bedc218d34e9b9cd, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125577792 2024-12-13T21:32:58,344 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 74c1c4fe6ccd437faa77cd6340ac22dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125577792 2024-12-13T21:32:58,353 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:58,354 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/8106d07a6b9546f7b999170fbaa1503d is 50, key is test_row_0/A:col10/1734125577898/Put/seqid=0 2024-12-13T21:32:58,355 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:58,356 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/1dfe24aa2ccb4c3ab4c410888355acd9 is 50, key is test_row_0/B:col10/1734125577898/Put/seqid=0 2024-12-13T21:32:58,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742294_1470 (size=12104) 2024-12-13T21:32:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742295_1471 (size=12104) 2024-12-13T21:32:58,369 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/8106d07a6b9546f7b999170fbaa1503d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/8106d07a6b9546f7b999170fbaa1503d 2024-12-13T21:32:58,376 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 8106d07a6b9546f7b999170fbaa1503d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:58,377 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:58,377 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125578341; duration=0sec 2024-12-13T21:32:58,377 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:32:58,377 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:32:58,377 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:32:58,378 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:32:58,379 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:32:58,379 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:58,379 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/3fffd23dd442433cb394309394862c9b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb588b7c77aa40f381eac5b6e5bb411e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9f7b5b77720a44f5b4f38889869e412c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.2 K 2024-12-13T21:32:58,379 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fffd23dd442433cb394309394862c9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734125575655 2024-12-13T21:32:58,379 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb588b7c77aa40f381eac5b6e5bb411e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734125575665 2024-12-13T21:32:58,380 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f7b5b77720a44f5b4f38889869e412c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125577792 2024-12-13T21:32:58,386 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#388 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:32:58,387 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/349b2ed789e04ff58269e1d60eb5199f is 50, key is test_row_0/C:col10/1734125577898/Put/seqid=0 2024-12-13T21:32:58,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742296_1472 (size=12104) 2024-12-13T21:32:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:58,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:32:58,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:32:58,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:58,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:32:58,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:58,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:32:58,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:58,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/3d12c6ea22d94e3489194d1749f00fa4 is 50, key is test_row_0/A:col10/1734125577926/Put/seqid=0 2024-12-13T21:32:58,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742297_1473 (size=14341) 2024-12-13T21:32:58,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/3d12c6ea22d94e3489194d1749f00fa4 2024-12-13T21:32:58,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125638554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125638554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125638555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/d1f9fe7855bc4784b3a67fb7a250cfd8 is 50, key is test_row_0/B:col10/1734125577926/Put/seqid=0 2024-12-13T21:32:58,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125638556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125638558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742298_1474 (size=12001) 2024-12-13T21:32:58,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125638659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125638663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125638663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125638664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125638664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,769 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/1dfe24aa2ccb4c3ab4c410888355acd9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1dfe24aa2ccb4c3ab4c410888355acd9 2024-12-13T21:32:58,774 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 1dfe24aa2ccb4c3ab4c410888355acd9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:58,774 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:58,774 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125578341; duration=0sec 2024-12-13T21:32:58,774 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:58,774 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:32:58,795 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/349b2ed789e04ff58269e1d60eb5199f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/349b2ed789e04ff58269e1d60eb5199f 2024-12-13T21:32:58,799 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into 349b2ed789e04ff58269e1d60eb5199f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:32:58,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:58,799 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125578342; duration=0sec 2024-12-13T21:32:58,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:32:58,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:32:58,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125638864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125638867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125638867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125638868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125638868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:58,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/d1f9fe7855bc4784b3a67fb7a250cfd8 2024-12-13T21:32:58,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/436c499911164dc981f11efc2798f594 is 50, key is test_row_0/C:col10/1734125577926/Put/seqid=0 2024-12-13T21:32:58,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742299_1475 (size=12001) 2024-12-13T21:32:58,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/436c499911164dc981f11efc2798f594 2024-12-13T21:32:58,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/3d12c6ea22d94e3489194d1749f00fa4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3d12c6ea22d94e3489194d1749f00fa4 2024-12-13T21:32:58,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3d12c6ea22d94e3489194d1749f00fa4, entries=200, sequenceid=76, filesize=14.0 K 2024-12-13T21:32:58,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/d1f9fe7855bc4784b3a67fb7a250cfd8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/d1f9fe7855bc4784b3a67fb7a250cfd8 2024-12-13T21:32:58,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/d1f9fe7855bc4784b3a67fb7a250cfd8, entries=150, sequenceid=76, filesize=11.7 K 2024-12-13T21:32:58,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/436c499911164dc981f11efc2798f594 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/436c499911164dc981f11efc2798f594 2024-12-13T21:32:58,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/436c499911164dc981f11efc2798f594, entries=150, sequenceid=76, filesize=11.7 K 2024-12-13T21:32:58,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 836c7ebcedb8aba974e5bf30d5802cfc in 452ms, sequenceid=76, compaction requested=false 2024-12-13T21:32:58,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:32:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:32:59,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:32:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:32:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:32:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:32:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:32:59,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/bbd8030bd05343b58580af341e9bccb4 is 50, key is test_row_0/A:col10/1734125579171/Put/seqid=0 2024-12-13T21:32:59,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742300_1476 (size=14341) 2024-12-13T21:32:59,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/bbd8030bd05343b58580af341e9bccb4 2024-12-13T21:32:59,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/edfee14373394051a15ec25b13baf36f is 50, key is test_row_0/B:col10/1734125579171/Put/seqid=0 2024-12-13T21:32:59,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742301_1477 (size=12001) 2024-12-13T21:32:59,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125639202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125639203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125639203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125639205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125639206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125639310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125639310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125639311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125639311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125639313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125639517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125639517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125639518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125639518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125639518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/edfee14373394051a15ec25b13baf36f 2024-12-13T21:32:59,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/629f40bd07f64df3b58f5199325e1cda is 50, key is test_row_0/C:col10/1734125579171/Put/seqid=0 2024-12-13T21:32:59,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742302_1478 (size=12001) 2024-12-13T21:32:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-13T21:32:59,752 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-13T21:32:59,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:32:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-13T21:32:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T21:32:59,754 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:32:59,754 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:32:59,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:32:59,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125639822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125639822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125639822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125639823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:32:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125639823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T21:32:59,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:32:59,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-13T21:32:59,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:59,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:32:59,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:32:59,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:59,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:32:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:00,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/629f40bd07f64df3b58f5199325e1cda 2024-12-13T21:33:00,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/bbd8030bd05343b58580af341e9bccb4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/bbd8030bd05343b58580af341e9bccb4 2024-12-13T21:33:00,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/bbd8030bd05343b58580af341e9bccb4, entries=200, sequenceid=91, filesize=14.0 K 2024-12-13T21:33:00,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/edfee14373394051a15ec25b13baf36f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/edfee14373394051a15ec25b13baf36f 2024-12-13T21:33:00,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/edfee14373394051a15ec25b13baf36f, entries=150, sequenceid=91, filesize=11.7 K 2024-12-13T21:33:00,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/629f40bd07f64df3b58f5199325e1cda as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/629f40bd07f64df3b58f5199325e1cda 2024-12-13T21:33:00,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/629f40bd07f64df3b58f5199325e1cda, entries=150, sequenceid=91, filesize=11.7 K 2024-12-13T21:33:00,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 836c7ebcedb8aba974e5bf30d5802cfc in 857ms, sequenceid=91, compaction requested=true 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:00,029 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:00,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:33:00,030 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:00,030 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:00,030 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:00,030 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:00,030 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:00,030 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:00,030 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:00,031 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/8106d07a6b9546f7b999170fbaa1503d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3d12c6ea22d94e3489194d1749f00fa4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/bbd8030bd05343b58580af341e9bccb4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=39.8 K 2024-12-13T21:33:00,031 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1dfe24aa2ccb4c3ab4c410888355acd9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/d1f9fe7855bc4784b3a67fb7a250cfd8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/edfee14373394051a15ec25b13baf36f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.3 K 2024-12-13T21:33:00,031 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8106d07a6b9546f7b999170fbaa1503d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125577792 2024-12-13T21:33:00,031 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dfe24aa2ccb4c3ab4c410888355acd9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125577792 2024-12-13T21:33:00,031 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d12c6ea22d94e3489194d1749f00fa4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734125577926 2024-12-13T21:33:00,031 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d1f9fe7855bc4784b3a67fb7a250cfd8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734125577926 2024-12-13T21:33:00,031 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting edfee14373394051a15ec25b13baf36f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734125578548 2024-12-13T21:33:00,032 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbd8030bd05343b58580af341e9bccb4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734125578548 2024-12-13T21:33:00,036 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#395 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:00,037 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/1ff3eff564904d48be7bfa7e979c5ca9 is 50, key is test_row_0/B:col10/1734125579171/Put/seqid=0 2024-12-13T21:33:00,039 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#396 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:00,039 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/ae41547d1468433ebc4c2ad32008602a is 50, key is test_row_0/A:col10/1734125579171/Put/seqid=0 2024-12-13T21:33:00,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742303_1479 (size=12207) 2024-12-13T21:33:00,044 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/1ff3eff564904d48be7bfa7e979c5ca9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1ff3eff564904d48be7bfa7e979c5ca9 2024-12-13T21:33:00,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742304_1480 (size=12207) 2024-12-13T21:33:00,053 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 1ff3eff564904d48be7bfa7e979c5ca9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:00,053 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:00,053 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125580029; duration=0sec 2024-12-13T21:33:00,053 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:00,053 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:00,053 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:00,055 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:00,055 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:00,055 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T21:33:00,055 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/349b2ed789e04ff58269e1d60eb5199f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/436c499911164dc981f11efc2798f594, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/629f40bd07f64df3b58f5199325e1cda] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.3 K 2024-12-13T21:33:00,056 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 349b2ed789e04ff58269e1d60eb5199f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734125577792 2024-12-13T21:33:00,056 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 436c499911164dc981f11efc2798f594, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734125577926 2024-12-13T21:33:00,056 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 629f40bd07f64df3b58f5199325e1cda, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734125578548 2024-12-13T21:33:00,057 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-13T21:33:00,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:00,057 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-13T21:33:00,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:00,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:00,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:00,058 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/ae41547d1468433ebc4c2ad32008602a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ae41547d1468433ebc4c2ad32008602a 2024-12-13T21:33:00,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:00,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:00,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:00,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/168e9671908643d980d143bb9f868303 is 50, key is test_row_0/A:col10/1734125579205/Put/seqid=0 2024-12-13T21:33:00,067 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into ae41547d1468433ebc4c2ad32008602a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:00,067 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:00,067 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125580029; duration=0sec 2024-12-13T21:33:00,067 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:00,067 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:00,077 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#398 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:00,077 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/81fb736c78ae457da7ec8aa921618ac0 is 50, key is test_row_0/C:col10/1734125579171/Put/seqid=0 2024-12-13T21:33:00,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742305_1481 (size=12001) 2024-12-13T21:33:00,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742306_1482 (size=12207) 2024-12-13T21:33:00,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:00,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:00,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125640334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125640335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125640337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125640336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125640338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T21:33:00,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125640439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125640439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125640439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125640439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125640440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,482 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/168e9671908643d980d143bb9f868303 2024-12-13T21:33:00,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/71e93129c05a426893dd4f7a381e0002 is 50, key is test_row_0/B:col10/1734125579205/Put/seqid=0 2024-12-13T21:33:00,489 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/81fb736c78ae457da7ec8aa921618ac0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/81fb736c78ae457da7ec8aa921618ac0 2024-12-13T21:33:00,493 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into 81fb736c78ae457da7ec8aa921618ac0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:00,493 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:00,493 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125580029; duration=0sec 2024-12-13T21:33:00,493 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:00,493 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:00,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742307_1483 (size=12001) 2024-12-13T21:33:00,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125640643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125640643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125640643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125640643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125640643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T21:33:00,893 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/71e93129c05a426893dd4f7a381e0002 2024-12-13T21:33:00,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/433fd56d946945ba9cf6866effab08b9 is 50, key is test_row_0/C:col10/1734125579205/Put/seqid=0 2024-12-13T21:33:00,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742308_1484 (size=12001) 2024-12-13T21:33:00,902 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/433fd56d946945ba9cf6866effab08b9 2024-12-13T21:33:00,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/168e9671908643d980d143bb9f868303 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/168e9671908643d980d143bb9f868303 2024-12-13T21:33:00,907 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/168e9671908643d980d143bb9f868303, entries=150, sequenceid=115, filesize=11.7 K 2024-12-13T21:33:00,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/71e93129c05a426893dd4f7a381e0002 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/71e93129c05a426893dd4f7a381e0002 2024-12-13T21:33:00,910 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/71e93129c05a426893dd4f7a381e0002, entries=150, sequenceid=115, filesize=11.7 K 2024-12-13T21:33:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/433fd56d946945ba9cf6866effab08b9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/433fd56d946945ba9cf6866effab08b9 2024-12-13T21:33:00,912 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/433fd56d946945ba9cf6866effab08b9, entries=150, sequenceid=115, filesize=11.7 K 2024-12-13T21:33:00,913 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 836c7ebcedb8aba974e5bf30d5802cfc in 856ms, sequenceid=115, compaction requested=false 2024-12-13T21:33:00,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:00,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:00,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-13T21:33:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-13T21:33:00,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-13T21:33:00,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1640 sec 2024-12-13T21:33:00,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.1670 sec 2024-12-13T21:33:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:00,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:33:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:00,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:00,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:00,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:00,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:00,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/d532fe3e60df4b6b99766420c2aa437b is 50, key is test_row_0/A:col10/1734125580336/Put/seqid=0 2024-12-13T21:33:00,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742309_1485 (size=16781) 2024-12-13T21:33:00,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125640965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125640969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125640970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125640970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125640970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125641071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125641073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125641075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125641075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125641075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125641276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125641279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125641279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125641280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125641280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/d532fe3e60df4b6b99766420c2aa437b 2024-12-13T21:33:01,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645 is 50, key is test_row_0/B:col10/1734125580336/Put/seqid=0 2024-12-13T21:33:01,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742310_1486 (size=12101) 2024-12-13T21:33:01,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125641581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125641581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125641581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125641585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:01,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125641585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:01,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645 2024-12-13T21:33:01,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/03122ed8ca934332a713e34db571a557 is 50, key is test_row_0/C:col10/1734125580336/Put/seqid=0 2024-12-13T21:33:01,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742311_1487 (size=12101) 2024-12-13T21:33:01,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/03122ed8ca934332a713e34db571a557 2024-12-13T21:33:01,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/d532fe3e60df4b6b99766420c2aa437b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/d532fe3e60df4b6b99766420c2aa437b 2024-12-13T21:33:01,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/d532fe3e60df4b6b99766420c2aa437b, entries=250, sequenceid=132, filesize=16.4 K 2024-12-13T21:33:01,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645 2024-12-13T21:33:01,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645, entries=150, sequenceid=132, filesize=11.8 K 2024-12-13T21:33:01,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/03122ed8ca934332a713e34db571a557 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/03122ed8ca934332a713e34db571a557 2024-12-13T21:33:01,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/03122ed8ca934332a713e34db571a557, entries=150, sequenceid=132, filesize=11.8 K 2024-12-13T21:33:01,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 836c7ebcedb8aba974e5bf30d5802cfc in 845ms, sequenceid=132, compaction requested=true 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:01,794 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:01,794 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:01,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40989 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:01,795 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:01,795 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:01,795 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ae41547d1468433ebc4c2ad32008602a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/168e9671908643d980d143bb9f868303, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/d532fe3e60df4b6b99766420c2aa437b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=40.0 K 2024-12-13T21:33:01,795 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1ff3eff564904d48be7bfa7e979c5ca9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/71e93129c05a426893dd4f7a381e0002, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.5 K 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ff3eff564904d48be7bfa7e979c5ca9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734125578548 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae41547d1468433ebc4c2ad32008602a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734125578548 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 168e9671908643d980d143bb9f868303, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734125579202 2024-12-13T21:33:01,795 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 71e93129c05a426893dd4f7a381e0002, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734125579202 2024-12-13T21:33:01,796 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting d532fe3e60df4b6b99766420c2aa437b, keycount=250, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734125580333 2024-12-13T21:33:01,796 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting cb5bf7cc3ccb48d7a7f6181e1bf4b645, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734125580336 2024-12-13T21:33:01,801 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#404 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:01,802 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/6944debc839c40b6bc2f42210aae0e6c is 50, key is test_row_0/A:col10/1734125580336/Put/seqid=0 2024-12-13T21:33:01,804 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:01,804 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/ced203f34f604af1b975261e0bd5efde is 50, key is test_row_0/B:col10/1734125580336/Put/seqid=0 2024-12-13T21:33:01,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742312_1488 (size=12409) 2024-12-13T21:33:01,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742313_1489 (size=12409) 2024-12-13T21:33:01,810 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/6944debc839c40b6bc2f42210aae0e6c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/6944debc839c40b6bc2f42210aae0e6c 2024-12-13T21:33:01,813 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 6944debc839c40b6bc2f42210aae0e6c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:01,813 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:01,813 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125581794; duration=0sec 2024-12-13T21:33:01,813 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:01,813 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:01,813 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:01,814 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:01,814 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:01,814 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:01,814 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/81fb736c78ae457da7ec8aa921618ac0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/433fd56d946945ba9cf6866effab08b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/03122ed8ca934332a713e34db571a557] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.5 K 2024-12-13T21:33:01,814 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81fb736c78ae457da7ec8aa921618ac0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734125578548 2024-12-13T21:33:01,815 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 433fd56d946945ba9cf6866effab08b9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734125579202 2024-12-13T21:33:01,815 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03122ed8ca934332a713e34db571a557, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734125580336 2024-12-13T21:33:01,821 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#406 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:01,822 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/5691e17688844bf1ab144e1b44277e6d is 50, key is test_row_0/C:col10/1734125580336/Put/seqid=0 2024-12-13T21:33:01,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742314_1490 (size=12409) 2024-12-13T21:33:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T21:33:01,859 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-13T21:33:01,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-13T21:33:01,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-13T21:33:01,861 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:01,862 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:01,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:01,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-13T21:33:02,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,014 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:02,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/22eecd283c7b4fdd99cea93d092b9cb6 is 50, key is test_row_0/A:col10/1734125580968/Put/seqid=0 2024-12-13T21:33:02,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742315_1491 (size=12151) 2024-12-13T21:33:02,024 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/22eecd283c7b4fdd99cea93d092b9cb6 2024-12-13T21:33:02,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/3ce93b47b2cb45b3910b206465b9df81 is 50, key is test_row_0/B:col10/1734125580968/Put/seqid=0 2024-12-13T21:33:02,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742316_1492 (size=12151) 2024-12-13T21:33:02,038 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/3ce93b47b2cb45b3910b206465b9df81 2024-12-13T21:33:02,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf is 50, key is test_row_0/C:col10/1734125580968/Put/seqid=0 2024-12-13T21:33:02,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742317_1493 (size=12151) 2024-12-13T21:33:02,049 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf 2024-12-13T21:33:02,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/22eecd283c7b4fdd99cea93d092b9cb6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/22eecd283c7b4fdd99cea93d092b9cb6 2024-12-13T21:33:02,060 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/22eecd283c7b4fdd99cea93d092b9cb6, entries=150, sequenceid=155, filesize=11.9 K 2024-12-13T21:33:02,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/3ce93b47b2cb45b3910b206465b9df81 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/3ce93b47b2cb45b3910b206465b9df81 2024-12-13T21:33:02,064 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/3ce93b47b2cb45b3910b206465b9df81, entries=150, sequenceid=155, filesize=11.9 K 2024-12-13T21:33:02,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf 2024-12-13T21:33:02,069 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf, entries=150, sequenceid=155, filesize=11.9 K 2024-12-13T21:33:02,069 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 836c7ebcedb8aba974e5bf30d5802cfc in 55ms, sequenceid=155, compaction requested=false 2024-12-13T21:33:02,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:02,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-13T21:33:02,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-13T21:33:02,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-13T21:33:02,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-12-13T21:33:02,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 214 msec 2024-12-13T21:33:02,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:02,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:33:02,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:02,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:02,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:02,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2bcfbca5b2be471191de1bf2f230ec6a is 50, key is test_row_0/A:col10/1734125582089/Put/seqid=0 2024-12-13T21:33:02,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742318_1494 (size=19321) 2024-12-13T21:33:02,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2bcfbca5b2be471191de1bf2f230ec6a 2024-12-13T21:33:02,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/16bbfa6ece6842a98b4ac52554e5e0e6 is 50, key is test_row_0/B:col10/1734125582089/Put/seqid=0 2024-12-13T21:33:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742319_1495 (size=12151) 2024-12-13T21:33:02,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125642129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125642133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125642133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125642140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125642140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-13T21:33:02,162 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-13T21:33:02,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:02,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-13T21:33:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-13T21:33:02,166 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:02,166 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:02,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:02,211 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/ced203f34f604af1b975261e0bd5efde as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/ced203f34f604af1b975261e0bd5efde 2024-12-13T21:33:02,217 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into ced203f34f604af1b975261e0bd5efde(size=12.1 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:02,217 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:02,217 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125581794; duration=0sec 2024-12-13T21:33:02,217 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:02,217 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:02,236 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/5691e17688844bf1ab144e1b44277e6d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/5691e17688844bf1ab144e1b44277e6d 2024-12-13T21:33:02,239 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into 5691e17688844bf1ab144e1b44277e6d(size=12.1 K), total size for store is 24.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:02,239 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:02,239 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125581794; duration=0sec 2024-12-13T21:33:02,239 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:02,239 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:02,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125642241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125642244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125642244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125642250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125642250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-13T21:33:02,319 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-13T21:33:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:02,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:02,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125642443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125642450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125642450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125642456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125642457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-13T21:33:02,471 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-13T21:33:02,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:02,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:02,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:02,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:02,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/16bbfa6ece6842a98b4ac52554e5e0e6 2024-12-13T21:33:02,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/405c833c90a44d818b297f8190da7ed4 is 50, key is test_row_0/C:col10/1734125582089/Put/seqid=0 2024-12-13T21:33:02,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742320_1496 (size=12151) 2024-12-13T21:33:02,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/405c833c90a44d818b297f8190da7ed4 2024-12-13T21:33:02,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2bcfbca5b2be471191de1bf2f230ec6a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2bcfbca5b2be471191de1bf2f230ec6a 2024-12-13T21:33:02,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2bcfbca5b2be471191de1bf2f230ec6a, entries=300, sequenceid=166, filesize=18.9 K 2024-12-13T21:33:02,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/16bbfa6ece6842a98b4ac52554e5e0e6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16bbfa6ece6842a98b4ac52554e5e0e6 2024-12-13T21:33:02,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16bbfa6ece6842a98b4ac52554e5e0e6, entries=150, sequenceid=166, filesize=11.9 K 2024-12-13T21:33:02,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/405c833c90a44d818b297f8190da7ed4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/405c833c90a44d818b297f8190da7ed4 2024-12-13T21:33:02,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/405c833c90a44d818b297f8190da7ed4, entries=150, sequenceid=166, filesize=11.9 K 2024-12-13T21:33:02,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 836c7ebcedb8aba974e5bf30d5802cfc in 457ms, sequenceid=166, compaction requested=true 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:02,553 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:02,553 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:02,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:02,560 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43881 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:02,560 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:02,560 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,560 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/6944debc839c40b6bc2f42210aae0e6c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/22eecd283c7b4fdd99cea93d092b9cb6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2bcfbca5b2be471191de1bf2f230ec6a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=42.9 K 2024-12-13T21:33:02,560 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:02,560 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:02,560 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,560 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/ced203f34f604af1b975261e0bd5efde, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/3ce93b47b2cb45b3910b206465b9df81, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16bbfa6ece6842a98b4ac52554e5e0e6] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.9 K 2024-12-13T21:33:02,561 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6944debc839c40b6bc2f42210aae0e6c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734125580336 2024-12-13T21:33:02,561 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ced203f34f604af1b975261e0bd5efde, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734125580336 2024-12-13T21:33:02,561 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22eecd283c7b4fdd99cea93d092b9cb6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734125580963 2024-12-13T21:33:02,561 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ce93b47b2cb45b3910b206465b9df81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734125580963 2024-12-13T21:33:02,561 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 16bbfa6ece6842a98b4ac52554e5e0e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734125582089 2024-12-13T21:33:02,561 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bcfbca5b2be471191de1bf2f230ec6a, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734125582089 2024-12-13T21:33:02,572 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#413 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:02,573 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/dafa2d814a9e4987ab2a80e692cc7fe4 is 50, key is test_row_0/B:col10/1734125582089/Put/seqid=0 2024-12-13T21:33:02,585 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:02,586 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2ff55c735c514ed197dba8ca50c15d05 is 50, key is test_row_0/A:col10/1734125582089/Put/seqid=0 2024-12-13T21:33:02,623 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:02,624 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:02,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:02,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742321_1497 (size=12561) 2024-12-13T21:33:02,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742322_1498 (size=12561) 2024-12-13T21:33:02,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c is 50, key is test_row_0/A:col10/1734125582128/Put/seqid=0 2024-12-13T21:33:02,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742323_1499 (size=12151) 2024-12-13T21:33:02,645 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c 2024-12-13T21:33:02,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/8e0181c5856348cdac7fbf8dce971c4a is 50, key is test_row_0/B:col10/1734125582128/Put/seqid=0 2024-12-13T21:33:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742324_1500 (size=12151) 2024-12-13T21:33:02,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:02,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125642759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125642760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125642760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125642760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-13T21:33:02,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125642761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125642865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125642866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:02,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:02,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125642869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,036 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/dafa2d814a9e4987ab2a80e692cc7fe4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/dafa2d814a9e4987ab2a80e692cc7fe4 2024-12-13T21:33:03,038 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2ff55c735c514ed197dba8ca50c15d05 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2ff55c735c514ed197dba8ca50c15d05 2024-12-13T21:33:03,040 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into dafa2d814a9e4987ab2a80e692cc7fe4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:03,040 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:03,040 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125582553; duration=0sec 2024-12-13T21:33:03,040 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:03,040 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:03,040 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:03,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:03,041 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:03,041 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:03,041 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/5691e17688844bf1ab144e1b44277e6d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/405c833c90a44d818b297f8190da7ed4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=35.9 K 2024-12-13T21:33:03,042 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5691e17688844bf1ab144e1b44277e6d, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734125580336 2024-12-13T21:33:03,042 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d3e3536ce6fd43c3af3a6d9fe6f44eaf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734125580963 2024-12-13T21:33:03,042 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 405c833c90a44d818b297f8190da7ed4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734125582089 2024-12-13T21:33:03,043 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 2ff55c735c514ed197dba8ca50c15d05(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:03,043 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:03,043 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125582553; duration=0sec 2024-12-13T21:33:03,043 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:03,043 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:03,048 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:03,048 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/af220dc36f9c4f7faef30d1dc907aac0 is 50, key is test_row_0/C:col10/1734125582089/Put/seqid=0 2024-12-13T21:33:03,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742325_1501 (size=12561) 2024-12-13T21:33:03,055 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/af220dc36f9c4f7faef30d1dc907aac0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/af220dc36f9c4f7faef30d1dc907aac0 2024-12-13T21:33:03,057 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/8e0181c5856348cdac7fbf8dce971c4a 2024-12-13T21:33:03,060 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into af220dc36f9c4f7faef30d1dc907aac0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:03,060 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:03,060 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125582553; duration=0sec 2024-12-13T21:33:03,060 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:03,060 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/ea3da4b519e7416da3a241997b56a7a7 is 50, key is test_row_0/C:col10/1734125582128/Put/seqid=0 2024-12-13T21:33:03,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125643072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125643072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125643072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742326_1502 (size=12151) 2024-12-13T21:33:03,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-13T21:33:03,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125643267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125643269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125643377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125643377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125643379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,481 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/ea3da4b519e7416da3a241997b56a7a7 2024-12-13T21:33:03,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c 2024-12-13T21:33:03,487 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c, entries=150, sequenceid=194, filesize=11.9 K 2024-12-13T21:33:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/8e0181c5856348cdac7fbf8dce971c4a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8e0181c5856348cdac7fbf8dce971c4a 2024-12-13T21:33:03,490 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8e0181c5856348cdac7fbf8dce971c4a, entries=150, sequenceid=194, filesize=11.9 K 2024-12-13T21:33:03,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/ea3da4b519e7416da3a241997b56a7a7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ea3da4b519e7416da3a241997b56a7a7 2024-12-13T21:33:03,493 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ea3da4b519e7416da3a241997b56a7a7, entries=150, sequenceid=194, filesize=11.9 K 2024-12-13T21:33:03,494 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 836c7ebcedb8aba974e5bf30d5802cfc in 870ms, sequenceid=194, compaction requested=false 2024-12-13T21:33:03,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:03,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:03,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-13T21:33:03,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-13T21:33:03,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-13T21:33:03,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3290 sec 2024-12-13T21:33:03,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.3330 sec 2024-12-13T21:33:03,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:03,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:33:03,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:03,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:03,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:03,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:03,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:03,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:03,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/4f1919cc61a0455b940323a302488f3d is 50, key is test_row_1/A:col10/1734125583884/Put/seqid=0 2024-12-13T21:33:03,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742327_1503 (size=9757) 2024-12-13T21:33:03,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/4f1919cc61a0455b940323a302488f3d 2024-12-13T21:33:03,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/81e7ac8717f04725b2adebb105b5706f is 50, key is test_row_1/B:col10/1734125583884/Put/seqid=0 2024-12-13T21:33:03,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742328_1504 (size=9757) 2024-12-13T21:33:03,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125643967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125643968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:03,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125643968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125644075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125644075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125644075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-13T21:33:04,269 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-13T21:33:04,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-13T21:33:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-13T21:33:04,271 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:04,271 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:04,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:04,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125644277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125644278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125644278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125644279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125644279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/81e7ac8717f04725b2adebb105b5706f 2024-12-13T21:33:04,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/eb88507695ca40a1962341857388556a is 50, key is test_row_1/C:col10/1734125583884/Put/seqid=0 2024-12-13T21:33:04,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742329_1505 (size=9757) 2024-12-13T21:33:04,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-13T21:33:04,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-13T21:33:04,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:04,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-13T21:33:04,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-13T21:33:04,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:04,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125644582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125644583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125644584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,727 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-13T21:33:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:04,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/eb88507695ca40a1962341857388556a 2024-12-13T21:33:04,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/4f1919cc61a0455b940323a302488f3d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/4f1919cc61a0455b940323a302488f3d 2024-12-13T21:33:04,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/4f1919cc61a0455b940323a302488f3d, entries=100, sequenceid=209, filesize=9.5 K 2024-12-13T21:33:04,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/81e7ac8717f04725b2adebb105b5706f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/81e7ac8717f04725b2adebb105b5706f 2024-12-13T21:33:04,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/81e7ac8717f04725b2adebb105b5706f, entries=100, sequenceid=209, filesize=9.5 K 2024-12-13T21:33:04,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/eb88507695ca40a1962341857388556a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/eb88507695ca40a1962341857388556a 2024-12-13T21:33:04,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/eb88507695ca40a1962341857388556a, entries=100, sequenceid=209, filesize=9.5 K 2024-12-13T21:33:04,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 836c7ebcedb8aba974e5bf30d5802cfc in 879ms, sequenceid=209, compaction requested=true 2024-12-13T21:33:04,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:04,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:04,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:04,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:04,765 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:04,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:04,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:04,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:33:04,765 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:04,766 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:04,766 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:04,766 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,766 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2ff55c735c514ed197dba8ca50c15d05, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/4f1919cc61a0455b940323a302488f3d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=33.7 K 2024-12-13T21:33:04,767 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:04,767 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ff55c735c514ed197dba8ca50c15d05, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734125582089 2024-12-13T21:33:04,767 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:04,767 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,767 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/dafa2d814a9e4987ab2a80e692cc7fe4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8e0181c5856348cdac7fbf8dce971c4a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/81e7ac8717f04725b2adebb105b5706f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=33.7 K 2024-12-13T21:33:04,768 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e9b1cd2f7ce4661b6be7b507e1b3a2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734125582128 2024-12-13T21:33:04,768 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting dafa2d814a9e4987ab2a80e692cc7fe4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734125582089 2024-12-13T21:33:04,768 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f1919cc61a0455b940323a302488f3d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734125583884 2024-12-13T21:33:04,768 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e0181c5856348cdac7fbf8dce971c4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734125582128 2024-12-13T21:33:04,768 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 81e7ac8717f04725b2adebb105b5706f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734125583884 2024-12-13T21:33:04,775 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#422 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:04,776 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/efbfe7629a834596922f0fcf160127d0 is 50, key is test_row_0/A:col10/1734125582128/Put/seqid=0 2024-12-13T21:33:04,781 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:04,782 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/715b3fc2f0c14c2690268ecfefd420e3 is 50, key is test_row_0/B:col10/1734125582128/Put/seqid=0 2024-12-13T21:33:04,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742330_1506 (size=12663) 2024-12-13T21:33:04,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742331_1507 (size=12663) 2024-12-13T21:33:04,796 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/715b3fc2f0c14c2690268ecfefd420e3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/715b3fc2f0c14c2690268ecfefd420e3 2024-12-13T21:33:04,800 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 715b3fc2f0c14c2690268ecfefd420e3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:04,800 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:04,800 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125584765; duration=0sec 2024-12-13T21:33:04,800 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:04,800 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:04,800 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:04,801 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:04,801 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:04,801 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,801 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/af220dc36f9c4f7faef30d1dc907aac0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ea3da4b519e7416da3a241997b56a7a7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/eb88507695ca40a1962341857388556a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=33.7 K 2024-12-13T21:33:04,802 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting af220dc36f9c4f7faef30d1dc907aac0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1734125582089 2024-12-13T21:33:04,803 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ea3da4b519e7416da3a241997b56a7a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734125582128 2024-12-13T21:33:04,803 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting eb88507695ca40a1962341857388556a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734125583884 2024-12-13T21:33:04,810 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#424 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:04,810 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/4ee1daa1d9e748bca7a0ba385a1d100f is 50, key is test_row_0/C:col10/1734125582128/Put/seqid=0 2024-12-13T21:33:04,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742332_1508 (size=12663) 2024-12-13T21:33:04,828 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/4ee1daa1d9e748bca7a0ba385a1d100f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/4ee1daa1d9e748bca7a0ba385a1d100f 2024-12-13T21:33:04,832 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into 4ee1daa1d9e748bca7a0ba385a1d100f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:04,832 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:04,832 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125584765; duration=0sec 2024-12-13T21:33:04,832 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:04,832 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:04,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-13T21:33:04,880 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:04,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:04,892 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:04,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/c48c7f0a886845d7864d0098a786b5e9 is 50, key is test_row_0/A:col10/1734125583937/Put/seqid=0 2024-12-13T21:33:04,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742333_1509 (size=12151) 2024-12-13T21:33:05,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:05,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:05,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125645097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125645098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125645102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,200 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/efbfe7629a834596922f0fcf160127d0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/efbfe7629a834596922f0fcf160127d0 2024-12-13T21:33:05,204 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into efbfe7629a834596922f0fcf160127d0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:05,204 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:05,204 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125584765; duration=0sec 2024-12-13T21:33:05,204 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:05,204 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:05,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125645203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125645206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125645211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,310 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/c48c7f0a886845d7864d0098a786b5e9 2024-12-13T21:33:05,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/e409525a3022478e9807c8b5f2052035 is 50, key is test_row_0/B:col10/1734125583937/Put/seqid=0 2024-12-13T21:33:05,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742334_1510 (size=12151) 2024-12-13T21:33:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-13T21:33:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125645408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125645408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125645415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125645711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125645714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,720 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/e409525a3022478e9807c8b5f2052035 2024-12-13T21:33:05,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125645720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:05,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/265d6548271b4409ae0f8dd90ee003cd is 50, key is test_row_0/C:col10/1734125583937/Put/seqid=0 2024-12-13T21:33:05,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742335_1511 (size=12151) 2024-12-13T21:33:06,129 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/265d6548271b4409ae0f8dd90ee003cd 2024-12-13T21:33:06,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/c48c7f0a886845d7864d0098a786b5e9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c48c7f0a886845d7864d0098a786b5e9 2024-12-13T21:33:06,135 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c48c7f0a886845d7864d0098a786b5e9, entries=150, sequenceid=236, filesize=11.9 K 2024-12-13T21:33:06,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/e409525a3022478e9807c8b5f2052035 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/e409525a3022478e9807c8b5f2052035 2024-12-13T21:33:06,139 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/e409525a3022478e9807c8b5f2052035, entries=150, sequenceid=236, filesize=11.9 K 2024-12-13T21:33:06,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/265d6548271b4409ae0f8dd90ee003cd as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/265d6548271b4409ae0f8dd90ee003cd 2024-12-13T21:33:06,142 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/265d6548271b4409ae0f8dd90ee003cd, entries=150, sequenceid=236, filesize=11.9 K 2024-12-13T21:33:06,143 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 836c7ebcedb8aba974e5bf30d5802cfc in 1251ms, sequenceid=236, compaction requested=false 2024-12-13T21:33:06,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:06,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-13T21:33:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-13T21:33:06,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-13T21:33:06,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8760 sec 2024-12-13T21:33:06,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.8800 sec 2024-12-13T21:33:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:06,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:33:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:06,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:06,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/69199e7d8cbd472cbc71cc91965e47a9 is 50, key is test_row_0/A:col10/1734125585096/Put/seqid=0 2024-12-13T21:33:06,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742336_1512 (size=14541) 2024-12-13T21:33:06,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/69199e7d8cbd472cbc71cc91965e47a9 2024-12-13T21:33:06,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/7b2f7d133dac4fda91166bb9d943ec5d is 50, key is test_row_0/B:col10/1734125585096/Put/seqid=0 2024-12-13T21:33:06,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742337_1513 (size=12151) 2024-12-13T21:33:06,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125646259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125646260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125646260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125646282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125646282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,287 DEBUG [Thread-2054 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:06,287 DEBUG [Thread-2058 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:06,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125646365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125646366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125646366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-13T21:33:06,374 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-13T21:33:06,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-13T21:33:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T21:33:06,376 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:06,376 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:06,377 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T21:33:06,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-13T21:33:06,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:06,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:06,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:06,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:06,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125646568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125646570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125646570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/7b2f7d133dac4fda91166bb9d943ec5d 2024-12-13T21:33:06,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/a438c7324b4a4d20a9135c320d809a85 is 50, key is test_row_0/C:col10/1734125585096/Put/seqid=0 2024-12-13T21:33:06,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742338_1514 (size=12151) 2024-12-13T21:33:06,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/a438c7324b4a4d20a9135c320d809a85 2024-12-13T21:33:06,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/69199e7d8cbd472cbc71cc91965e47a9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/69199e7d8cbd472cbc71cc91965e47a9 2024-12-13T21:33:06,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T21:33:06,680 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-13T21:33:06,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/69199e7d8cbd472cbc71cc91965e47a9, entries=200, sequenceid=249, filesize=14.2 K 2024-12-13T21:33:06,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/7b2f7d133dac4fda91166bb9d943ec5d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7b2f7d133dac4fda91166bb9d943ec5d 2024-12-13T21:33:06,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7b2f7d133dac4fda91166bb9d943ec5d, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:33:06,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/a438c7324b4a4d20a9135c320d809a85 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/a438c7324b4a4d20a9135c320d809a85 2024-12-13T21:33:06,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:06,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:06,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:06,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:06,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/a438c7324b4a4d20a9135c320d809a85, entries=150, sequenceid=249, filesize=11.9 K 2024-12-13T21:33:06,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 836c7ebcedb8aba974e5bf30d5802cfc in 475ms, sequenceid=249, compaction requested=true 2024-12-13T21:33:06,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:06,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:06,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:06,694 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:06,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:06,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:06,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:06,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:33:06,694 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:06,695 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:06,695 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/efbfe7629a834596922f0fcf160127d0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c48c7f0a886845d7864d0098a786b5e9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/69199e7d8cbd472cbc71cc91965e47a9] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=38.4 K 2024-12-13T21:33:06,695 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,695 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/715b3fc2f0c14c2690268ecfefd420e3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/e409525a3022478e9807c8b5f2052035, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7b2f7d133dac4fda91166bb9d943ec5d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.1 K 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting efbfe7629a834596922f0fcf160127d0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734125582128 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 715b3fc2f0c14c2690268ecfefd420e3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734125582128 2024-12-13T21:33:06,695 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting c48c7f0a886845d7864d0098a786b5e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1734125583906 2024-12-13T21:33:06,696 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e409525a3022478e9807c8b5f2052035, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1734125583906 2024-12-13T21:33:06,696 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69199e7d8cbd472cbc71cc91965e47a9, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125585096 2024-12-13T21:33:06,696 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b2f7d133dac4fda91166bb9d943ec5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125585096 2024-12-13T21:33:06,708 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#431 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:06,709 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/77e6e095370c47b6a287add5752c53c3 is 50, key is test_row_0/B:col10/1734125585096/Put/seqid=0 2024-12-13T21:33:06,711 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:06,711 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/68219fec449747fe871ed2e67c316181 is 50, key is test_row_0/A:col10/1734125585096/Put/seqid=0 2024-12-13T21:33:06,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742339_1515 (size=12765) 2024-12-13T21:33:06,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742340_1516 (size=12765) 2024-12-13T21:33:06,753 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/77e6e095370c47b6a287add5752c53c3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/77e6e095370c47b6a287add5752c53c3 2024-12-13T21:33:06,758 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 77e6e095370c47b6a287add5752c53c3(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:06,758 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:06,758 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125586694; duration=0sec 2024-12-13T21:33:06,758 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:06,758 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:06,758 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:06,760 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:06,760 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:06,760 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,760 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/4ee1daa1d9e748bca7a0ba385a1d100f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/265d6548271b4409ae0f8dd90ee003cd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/a438c7324b4a4d20a9135c320d809a85] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.1 K 2024-12-13T21:33:06,761 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ee1daa1d9e748bca7a0ba385a1d100f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1734125582128 2024-12-13T21:33:06,761 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 265d6548271b4409ae0f8dd90ee003cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1734125583906 2024-12-13T21:33:06,761 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a438c7324b4a4d20a9135c320d809a85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125585096 2024-12-13T21:33:06,768 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#433 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:06,769 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/210ace817ad44acc8a9a2d4ea16e25cb is 50, key is test_row_0/C:col10/1734125585096/Put/seqid=0 2024-12-13T21:33:06,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742341_1517 (size=12765) 2024-12-13T21:33:06,842 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-13T21:33:06,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:06,843 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:33:06,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:06,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:06,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:06,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:06,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:06,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:06,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/169e49c8aa424bd6aa2a1794e677d863 is 50, key is test_row_0/A:col10/1734125586242/Put/seqid=0 2024-12-13T21:33:06,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742342_1518 (size=12301) 2024-12-13T21:33:06,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:06,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:06,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125646884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125646886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125646887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T21:33:06,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125646989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125646992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:06,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:06,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125646993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,142 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/68219fec449747fe871ed2e67c316181 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/68219fec449747fe871ed2e67c316181 2024-12-13T21:33:07,145 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 68219fec449747fe871ed2e67c316181(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:07,145 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:07,145 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125586693; duration=0sec 2024-12-13T21:33:07,146 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:07,146 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:07,177 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/210ace817ad44acc8a9a2d4ea16e25cb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/210ace817ad44acc8a9a2d4ea16e25cb 2024-12-13T21:33:07,181 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into 210ace817ad44acc8a9a2d4ea16e25cb(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:07,181 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:07,181 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125586694; duration=0sec 2024-12-13T21:33:07,181 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:07,181 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:07,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:07,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125647191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:07,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125647197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:07,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125647197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,252 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/169e49c8aa424bd6aa2a1794e677d863 2024-12-13T21:33:07,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/99409c043b744c0188c6fa8f8f8b619d is 50, key is test_row_0/B:col10/1734125586242/Put/seqid=0 2024-12-13T21:33:07,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742343_1519 (size=12301) 2024-12-13T21:33:07,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T21:33:07,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125647495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125647502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125647502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:07,665 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/99409c043b744c0188c6fa8f8f8b619d 2024-12-13T21:33:07,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/1d26710c4dd748d09304a79004cb5dd0 is 50, key is test_row_0/C:col10/1734125586242/Put/seqid=0 2024-12-13T21:33:07,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742344_1520 (size=12301) 2024-12-13T21:33:07,686 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/1d26710c4dd748d09304a79004cb5dd0 2024-12-13T21:33:07,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/169e49c8aa424bd6aa2a1794e677d863 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/169e49c8aa424bd6aa2a1794e677d863 2024-12-13T21:33:07,696 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/169e49c8aa424bd6aa2a1794e677d863, entries=150, sequenceid=274, filesize=12.0 K 2024-12-13T21:33:07,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/99409c043b744c0188c6fa8f8f8b619d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/99409c043b744c0188c6fa8f8f8b619d 2024-12-13T21:33:07,701 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/99409c043b744c0188c6fa8f8f8b619d, entries=150, sequenceid=274, filesize=12.0 K 2024-12-13T21:33:07,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/1d26710c4dd748d09304a79004cb5dd0 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/1d26710c4dd748d09304a79004cb5dd0 2024-12-13T21:33:07,708 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/1d26710c4dd748d09304a79004cb5dd0, entries=150, sequenceid=274, filesize=12.0 K 2024-12-13T21:33:07,709 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 836c7ebcedb8aba974e5bf30d5802cfc in 866ms, sequenceid=274, compaction requested=false 2024-12-13T21:33:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-13T21:33:07,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-13T21:33:07,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-13T21:33:07,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3330 sec 2024-12-13T21:33:07,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.3360 sec 2024-12-13T21:33:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:08,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:33:08,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:08,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:08,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:08,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:08,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:08,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:08,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1f384051af9b422caa76b3962f4253d7 is 50, key is test_row_0/A:col10/1734125586886/Put/seqid=0 2024-12-13T21:33:08,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125648053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125648057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125648059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742345_1521 (size=12301) 2024-12-13T21:33:08,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1f384051af9b422caa76b3962f4253d7 2024-12-13T21:33:08,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/c4a7aa0fd8544401871a0d84aeb4b0c4 is 50, key is test_row_0/B:col10/1734125586886/Put/seqid=0 2024-12-13T21:33:08,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742346_1522 (size=12301) 2024-12-13T21:33:08,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/c4a7aa0fd8544401871a0d84aeb4b0c4 2024-12-13T21:33:08,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/88761227d2b74e54a4174356d96a6b6e is 50, key is test_row_0/C:col10/1734125586886/Put/seqid=0 2024-12-13T21:33:08,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742347_1523 (size=12301) 2024-12-13T21:33:08,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/88761227d2b74e54a4174356d96a6b6e 2024-12-13T21:33:08,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1f384051af9b422caa76b3962f4253d7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1f384051af9b422caa76b3962f4253d7 2024-12-13T21:33:08,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1f384051af9b422caa76b3962f4253d7, entries=150, sequenceid=289, filesize=12.0 K 2024-12-13T21:33:08,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/c4a7aa0fd8544401871a0d84aeb4b0c4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/c4a7aa0fd8544401871a0d84aeb4b0c4 2024-12-13T21:33:08,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/c4a7aa0fd8544401871a0d84aeb4b0c4, entries=150, sequenceid=289, filesize=12.0 K 2024-12-13T21:33:08,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/88761227d2b74e54a4174356d96a6b6e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/88761227d2b74e54a4174356d96a6b6e 2024-12-13T21:33:08,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125648165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125648165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125648166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/88761227d2b74e54a4174356d96a6b6e, entries=150, sequenceid=289, filesize=12.0 K 2024-12-13T21:33:08,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 836c7ebcedb8aba974e5bf30d5802cfc in 171ms, sequenceid=289, compaction requested=true 2024-12-13T21:33:08,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:08,176 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:08,177 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:08,177 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:08,177 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,177 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/68219fec449747fe871ed2e67c316181, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/169e49c8aa424bd6aa2a1794e677d863, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1f384051af9b422caa76b3962f4253d7] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.5 K 2024-12-13T21:33:08,177 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68219fec449747fe871ed2e67c316181, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125585096 2024-12-13T21:33:08,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:08,178 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 169e49c8aa424bd6aa2a1794e677d863, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734125586242 2024-12-13T21:33:08,178 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f384051af9b422caa76b3962f4253d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125586879 2024-12-13T21:33:08,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:08,181 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:08,182 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:08,183 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:08,183 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,183 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/77e6e095370c47b6a287add5752c53c3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/99409c043b744c0188c6fa8f8f8b619d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/c4a7aa0fd8544401871a0d84aeb4b0c4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.5 K 2024-12-13T21:33:08,184 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 77e6e095370c47b6a287add5752c53c3, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125585096 2024-12-13T21:33:08,185 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 99409c043b744c0188c6fa8f8f8b619d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734125586242 2024-12-13T21:33:08,185 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c4a7aa0fd8544401871a0d84aeb4b0c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125586879 2024-12-13T21:33:08,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:08,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:08,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:08,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:08,195 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#440 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:08,196 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/397f16e6589c48cfa797872af3f1a69f is 50, key is test_row_0/A:col10/1734125586886/Put/seqid=0 2024-12-13T21:33:08,206 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:08,207 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/39f49c7105f04e8885c81c6315a018b3 is 50, key is test_row_0/B:col10/1734125586886/Put/seqid=0 2024-12-13T21:33:08,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742348_1524 (size=13017) 2024-12-13T21:33:08,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742349_1525 (size=13017) 2024-12-13T21:33:08,272 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/39f49c7105f04e8885c81c6315a018b3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/39f49c7105f04e8885c81c6315a018b3 2024-12-13T21:33:08,276 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 39f49c7105f04e8885c81c6315a018b3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:08,276 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:08,276 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125588180; duration=0sec 2024-12-13T21:33:08,276 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:08,276 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:08,276 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:08,278 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:08,278 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:08,278 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,278 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/210ace817ad44acc8a9a2d4ea16e25cb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/1d26710c4dd748d09304a79004cb5dd0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/88761227d2b74e54a4174356d96a6b6e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.5 K 2024-12-13T21:33:08,279 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 210ace817ad44acc8a9a2d4ea16e25cb, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1734125585096 2024-12-13T21:33:08,279 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d26710c4dd748d09304a79004cb5dd0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734125586242 2024-12-13T21:33:08,280 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 88761227d2b74e54a4174356d96a6b6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125586879 2024-12-13T21:33:08,289 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#442 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:08,289 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bb4da79422b647ca9d177f7238f7ed9b is 50, key is test_row_0/C:col10/1734125586886/Put/seqid=0 2024-12-13T21:33:08,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742350_1526 (size=13017) 2024-12-13T21:33:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:08,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:33:08,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:08,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:08,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:08,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:08,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:08,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:08,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1b600dec213649a395f72b304387cbf1 is 50, key is test_row_0/A:col10/1734125588057/Put/seqid=0 2024-12-13T21:33:08,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742351_1527 (size=14741) 2024-12-13T21:33:08,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1b600dec213649a395f72b304387cbf1 2024-12-13T21:33:08,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/be6e62f63ec14aa49d63bc02f6594698 is 50, key is test_row_0/B:col10/1734125588057/Put/seqid=0 2024-12-13T21:33:08,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125648386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742352_1528 (size=12301) 2024-12-13T21:33:08,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125648392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125648392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T21:33:08,482 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-13T21:33:08,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-13T21:33:08,484 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-13T21:33:08,484 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:08,484 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:08,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125648492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125648499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125648498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-13T21:33:08,635 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-13T21:33:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,656 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/397f16e6589c48cfa797872af3f1a69f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/397f16e6589c48cfa797872af3f1a69f 2024-12-13T21:33:08,659 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 397f16e6589c48cfa797872af3f1a69f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:08,659 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:08,659 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125588176; duration=0sec 2024-12-13T21:33:08,659 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:08,659 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:08,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125648697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125648703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125648704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,716 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bb4da79422b647ca9d177f7238f7ed9b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb4da79422b647ca9d177f7238f7ed9b 2024-12-13T21:33:08,719 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into bb4da79422b647ca9d177f7238f7ed9b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:08,719 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:08,719 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125588188; duration=0sec 2024-12-13T21:33:08,720 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:08,720 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-13T21:33:08,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-13T21:33:08,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:08,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/be6e62f63ec14aa49d63bc02f6594698 2024-12-13T21:33:08,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bc6632934bf04db2a8afd63200de06fb is 50, key is test_row_0/C:col10/1734125588057/Put/seqid=0 2024-12-13T21:33:08,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742353_1529 (size=12301) 2024-12-13T21:33:08,940 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:08,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-13T21:33:08,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:08,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:08,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:09,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125649001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125649011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125649012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-13T21:33:09,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-13T21:33:09,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:09,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:09,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:09,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:09,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:09,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:09,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bc6632934bf04db2a8afd63200de06fb 2024-12-13T21:33:09,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1b600dec213649a395f72b304387cbf1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1b600dec213649a395f72b304387cbf1 2024-12-13T21:33:09,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1b600dec213649a395f72b304387cbf1, entries=200, sequenceid=314, filesize=14.4 K 2024-12-13T21:33:09,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/be6e62f63ec14aa49d63bc02f6594698 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/be6e62f63ec14aa49d63bc02f6594698 2024-12-13T21:33:09,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/be6e62f63ec14aa49d63bc02f6594698, entries=150, sequenceid=314, filesize=12.0 K 2024-12-13T21:33:09,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/bc6632934bf04db2a8afd63200de06fb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bc6632934bf04db2a8afd63200de06fb 2024-12-13T21:33:09,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bc6632934bf04db2a8afd63200de06fb, entries=150, sequenceid=314, filesize=12.0 K 2024-12-13T21:33:09,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 836c7ebcedb8aba974e5bf30d5802cfc in 848ms, sequenceid=314, compaction requested=false 2024-12-13T21:33:09,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:09,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-13T21:33:09,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:09,245 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:33:09,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:09,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/add90e0a0de847629c2ba28b6dc1e7e6 is 50, key is test_row_0/A:col10/1734125588389/Put/seqid=0 2024-12-13T21:33:09,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742354_1530 (size=12301) 2024-12-13T21:33:09,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:09,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:09,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125649541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125649544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125649545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-13T21:33:09,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125649646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125649648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125649648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,663 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/add90e0a0de847629c2ba28b6dc1e7e6 2024-12-13T21:33:09,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/8403aec803e84c08b00458e15fdfc7cb is 50, key is test_row_0/B:col10/1734125588389/Put/seqid=0 2024-12-13T21:33:09,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742355_1531 (size=12301) 2024-12-13T21:33:09,672 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/8403aec803e84c08b00458e15fdfc7cb 2024-12-13T21:33:09,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/0255a566795f4137a6f1a1fe9505248c is 50, key is test_row_0/C:col10/1734125588389/Put/seqid=0 2024-12-13T21:33:09,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742356_1532 (size=12301) 2024-12-13T21:33:09,679 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/0255a566795f4137a6f1a1fe9505248c 2024-12-13T21:33:09,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/add90e0a0de847629c2ba28b6dc1e7e6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/add90e0a0de847629c2ba28b6dc1e7e6 2024-12-13T21:33:09,685 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/add90e0a0de847629c2ba28b6dc1e7e6, entries=150, sequenceid=329, filesize=12.0 K 2024-12-13T21:33:09,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/8403aec803e84c08b00458e15fdfc7cb as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8403aec803e84c08b00458e15fdfc7cb 2024-12-13T21:33:09,689 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8403aec803e84c08b00458e15fdfc7cb, entries=150, sequenceid=329, filesize=12.0 K 2024-12-13T21:33:09,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/0255a566795f4137a6f1a1fe9505248c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/0255a566795f4137a6f1a1fe9505248c 2024-12-13T21:33:09,693 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/0255a566795f4137a6f1a1fe9505248c, entries=150, sequenceid=329, filesize=12.0 K 2024-12-13T21:33:09,693 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 836c7ebcedb8aba974e5bf30d5802cfc in 448ms, sequenceid=329, compaction requested=true 2024-12-13T21:33:09,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:09,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:09,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-13T21:33:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-13T21:33:09,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-13T21:33:09,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2100 sec 2024-12-13T21:33:09,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.2130 sec 2024-12-13T21:33:09,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:09,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-13T21:33:09,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:09,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:09,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:09,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:09,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:09,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:09,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/e3cc3f1d90bf46d3b7f0e39308f96bb5 is 50, key is test_row_0/A:col10/1734125589544/Put/seqid=0 2024-12-13T21:33:09,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742357_1533 (size=14741) 2024-12-13T21:33:09,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125649869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125649869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125649870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125649975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125649975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:09,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125649975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125650179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125650180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125650180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/e3cc3f1d90bf46d3b7f0e39308f96bb5 2024-12-13T21:33:10,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/bab7c55acd2a44b89fbb1b3ad1c55185 is 50, key is test_row_0/B:col10/1734125589544/Put/seqid=0 2024-12-13T21:33:10,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742358_1534 (size=12301) 2024-12-13T21:33:10,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36324 deadline: 1734125650300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,304 DEBUG [Thread-2058 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:10,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36352 deadline: 1734125650304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,308 DEBUG [Thread-2054 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., hostname=fd052dae32be,38989,1734125418878, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:10,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125650486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125650487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125650487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-13T21:33:10,587 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-13T21:33:10,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-13T21:33:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-13T21:33:10,589 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:10,589 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:10,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:10,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/bab7c55acd2a44b89fbb1b3ad1c55185 2024-12-13T21:33:10,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9e685de5a290495e97a3746d387d388e is 50, key is test_row_0/C:col10/1734125589544/Put/seqid=0 2024-12-13T21:33:10,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742359_1535 (size=12301) 2024-12-13T21:33:10,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-13T21:33:10,740 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-13T21:33:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:10,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:10,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:10,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-13T21:33:10,893 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-13T21:33:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:10,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:10,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:10,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:10,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125650991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:10,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:10,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125650993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125650994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:11,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:11,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-13T21:33:11,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:11,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:11,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:11,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:11,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:11,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:11,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9e685de5a290495e97a3746d387d388e 2024-12-13T21:33:11,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/e3cc3f1d90bf46d3b7f0e39308f96bb5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e3cc3f1d90bf46d3b7f0e39308f96bb5 2024-12-13T21:33:11,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e3cc3f1d90bf46d3b7f0e39308f96bb5, entries=200, sequenceid=353, filesize=14.4 K 2024-12-13T21:33:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/bab7c55acd2a44b89fbb1b3ad1c55185 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/bab7c55acd2a44b89fbb1b3ad1c55185 2024-12-13T21:33:11,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/bab7c55acd2a44b89fbb1b3ad1c55185, entries=150, sequenceid=353, filesize=12.0 K 2024-12-13T21:33:11,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9e685de5a290495e97a3746d387d388e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9e685de5a290495e97a3746d387d388e 2024-12-13T21:33:11,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9e685de5a290495e97a3746d387d388e, entries=150, sequenceid=353, filesize=12.0 K 2024-12-13T21:33:11,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 836c7ebcedb8aba974e5bf30d5802cfc in 1242ms, sequenceid=353, compaction requested=true 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:11,094 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:11,094 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:11,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54800 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:11,095 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:11,095 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:11,095 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/397f16e6589c48cfa797872af3f1a69f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1b600dec213649a395f72b304387cbf1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/add90e0a0de847629c2ba28b6dc1e7e6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e3cc3f1d90bf46d3b7f0e39308f96bb5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=53.5 K 2024-12-13T21:33:11,095 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/39f49c7105f04e8885c81c6315a018b3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/be6e62f63ec14aa49d63bc02f6594698, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8403aec803e84c08b00458e15fdfc7cb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/bab7c55acd2a44b89fbb1b3ad1c55185] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=48.8 K 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 397f16e6589c48cfa797872af3f1a69f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125586879 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39f49c7105f04e8885c81c6315a018b3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125586879 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b600dec213649a395f72b304387cbf1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734125588051 2024-12-13T21:33:11,095 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting be6e62f63ec14aa49d63bc02f6594698, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734125588051 2024-12-13T21:33:11,096 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting add90e0a0de847629c2ba28b6dc1e7e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734125588385 2024-12-13T21:33:11,096 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8403aec803e84c08b00458e15fdfc7cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734125588385 2024-12-13T21:33:11,096 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e3cc3f1d90bf46d3b7f0e39308f96bb5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734125589535 2024-12-13T21:33:11,096 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab7c55acd2a44b89fbb1b3ad1c55185, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734125589543 2024-12-13T21:33:11,101 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#453 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:11,102 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/159bcc3b0e2942a397a00d3df3f0fa7e is 50, key is test_row_0/B:col10/1734125589544/Put/seqid=0 2024-12-13T21:33:11,102 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#452 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:11,102 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/c9f975a5f37246e5a3bd3c99ccb2c560 is 50, key is test_row_0/A:col10/1734125589544/Put/seqid=0 2024-12-13T21:33:11,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742360_1536 (size=13153) 2024-12-13T21:33:11,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742361_1537 (size=13153) 2024-12-13T21:33:11,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-13T21:33:11,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:11,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:11,198 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:11,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/ad9d0abc443141b8a338cec0614890e9 is 50, key is test_row_0/A:col10/1734125589868/Put/seqid=0 2024-12-13T21:33:11,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742362_1538 (size=12301) 2024-12-13T21:33:11,510 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/c9f975a5f37246e5a3bd3c99ccb2c560 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c9f975a5f37246e5a3bd3c99ccb2c560 2024-12-13T21:33:11,510 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/159bcc3b0e2942a397a00d3df3f0fa7e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/159bcc3b0e2942a397a00d3df3f0fa7e 2024-12-13T21:33:11,513 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 159bcc3b0e2942a397a00d3df3f0fa7e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:11,513 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into c9f975a5f37246e5a3bd3c99ccb2c560(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:11,513 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=12, startTime=1734125591094; duration=0sec 2024-12-13T21:33:11,513 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=12, startTime=1734125591094; duration=0sec 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:11,513 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-13T21:33:11,515 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-13T21:33:11,515 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:11,515 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:11,515 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb4da79422b647ca9d177f7238f7ed9b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bc6632934bf04db2a8afd63200de06fb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/0255a566795f4137a6f1a1fe9505248c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9e685de5a290495e97a3746d387d388e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=48.8 K 2024-12-13T21:33:11,515 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bb4da79422b647ca9d177f7238f7ed9b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734125586879 2024-12-13T21:33:11,515 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting bc6632934bf04db2a8afd63200de06fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734125588051 2024-12-13T21:33:11,516 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 0255a566795f4137a6f1a1fe9505248c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734125588385 2024-12-13T21:33:11,516 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e685de5a290495e97a3746d387d388e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734125589543 2024-12-13T21:33:11,522 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#455 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:11,522 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9245eeb787c444d485356730c6962138 is 50, key is test_row_0/C:col10/1734125589544/Put/seqid=0 2024-12-13T21:33:11,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742363_1539 (size=13153) 2024-12-13T21:33:11,608 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/ad9d0abc443141b8a338cec0614890e9 2024-12-13T21:33:11,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313 is 50, key is test_row_0/B:col10/1734125589868/Put/seqid=0 2024-12-13T21:33:11,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742364_1540 (size=12301) 2024-12-13T21:33:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-13T21:33:11,929 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/9245eeb787c444d485356730c6962138 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9245eeb787c444d485356730c6962138 2024-12-13T21:33:11,932 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into 9245eeb787c444d485356730c6962138(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:11,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:11,932 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=12, startTime=1734125591094; duration=0sec 2024-12-13T21:33:11,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:11,932 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:11,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:11,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:12,016 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313 2024-12-13T21:33:12,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/666a6d0778c546728bfd5223bd87c59e is 50, key is test_row_0/C:col10/1734125589868/Put/seqid=0 2024-12-13T21:33:12,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742365_1541 (size=12301) 2024-12-13T21:33:12,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125652025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125652026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125652027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125652128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125652130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125652133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125652332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125652332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125652337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,424 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/666a6d0778c546728bfd5223bd87c59e 2024-12-13T21:33:12,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/ad9d0abc443141b8a338cec0614890e9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ad9d0abc443141b8a338cec0614890e9 2024-12-13T21:33:12,429 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ad9d0abc443141b8a338cec0614890e9, entries=150, sequenceid=365, filesize=12.0 K 2024-12-13T21:33:12,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313 2024-12-13T21:33:12,432 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313, entries=150, sequenceid=365, filesize=12.0 K 2024-12-13T21:33:12,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/666a6d0778c546728bfd5223bd87c59e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/666a6d0778c546728bfd5223bd87c59e 2024-12-13T21:33:12,435 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/666a6d0778c546728bfd5223bd87c59e, entries=150, sequenceid=365, filesize=12.0 K 2024-12-13T21:33:12,435 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 836c7ebcedb8aba974e5bf30d5802cfc in 1237ms, sequenceid=365, compaction requested=false 2024-12-13T21:33:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-13T21:33:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-13T21:33:12,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-13T21:33:12,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8470 sec 2024-12-13T21:33:12,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.8490 sec 2024-12-13T21:33:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:12,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:33:12,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:12,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:12,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:12,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:12,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:12,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:12,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/9b59c4af2fca40ae8877cc9e04b18fe4 is 50, key is test_row_0/A:col10/1734125592637/Put/seqid=0 2024-12-13T21:33:12,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742366_1542 (size=17181) 2024-12-13T21:33:12,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/9b59c4af2fca40ae8877cc9e04b18fe4 2024-12-13T21:33:12,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/fb5aab9198834d298caa8f5a4c8c2d41 is 50, key is test_row_0/B:col10/1734125592637/Put/seqid=0 2024-12-13T21:33:12,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742367_1543 (size=12301) 2024-12-13T21:33:12,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125652650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125652650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125652655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-13T21:33:12,692 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-13T21:33:12,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:12,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-12-13T21:33:12,694 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:12,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-13T21:33:12,694 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:12,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:12,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125652756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125652757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125652762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-13T21:33:12,846 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-13T21:33:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:12,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:12,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:12,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:12,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125652960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125652960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125652966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-13T21:33:12,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:12,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-13T21:33:12,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:12,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:12,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:12,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:12,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/fb5aab9198834d298caa8f5a4c8c2d41 2024-12-13T21:33:13,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/fd4478a72cee4a64a1f80d4c88da8972 is 50, key is test_row_0/C:col10/1734125592637/Put/seqid=0 2024-12-13T21:33:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742368_1544 (size=12301) 2024-12-13T21:33:13,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-13T21:33:13,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:13,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125653266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125653267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125653281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-13T21:33:13,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-13T21:33:13,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:13,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-13T21:33:13,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:13,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:13,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/fd4478a72cee4a64a1f80d4c88da8972 2024-12-13T21:33:13,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/9b59c4af2fca40ae8877cc9e04b18fe4 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/9b59c4af2fca40ae8877cc9e04b18fe4 2024-12-13T21:33:13,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/9b59c4af2fca40ae8877cc9e04b18fe4, entries=250, sequenceid=394, filesize=16.8 K 2024-12-13T21:33:13,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/fb5aab9198834d298caa8f5a4c8c2d41 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/fb5aab9198834d298caa8f5a4c8c2d41 2024-12-13T21:33:13,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/fb5aab9198834d298caa8f5a4c8c2d41, entries=150, sequenceid=394, filesize=12.0 K 2024-12-13T21:33:13,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/fd4478a72cee4a64a1f80d4c88da8972 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fd4478a72cee4a64a1f80d4c88da8972 2024-12-13T21:33:13,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fd4478a72cee4a64a1f80d4c88da8972, entries=150, sequenceid=394, filesize=12.0 K 2024-12-13T21:33:13,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 836c7ebcedb8aba974e5bf30d5802cfc in 837ms, sequenceid=394, compaction requested=true 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:13,475 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:13,475 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:13,475 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:13,476 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:13,476 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,476 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c9f975a5f37246e5a3bd3c99ccb2c560, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ad9d0abc443141b8a338cec0614890e9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/9b59c4af2fca40ae8877cc9e04b18fe4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=41.6 K 2024-12-13T21:33:13,476 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:13,476 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:13,476 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,476 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/159bcc3b0e2942a397a00d3df3f0fa7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/fb5aab9198834d298caa8f5a4c8c2d41] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.9 K 2024-12-13T21:33:13,476 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9f975a5f37246e5a3bd3c99ccb2c560, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734125589543 2024-12-13T21:33:13,476 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 159bcc3b0e2942a397a00d3df3f0fa7e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734125589543 2024-12-13T21:33:13,477 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad9d0abc443141b8a338cec0614890e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1734125589858 2024-12-13T21:33:13,477 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ccdb0e5aa6e4d54b4f4c3fa7e697313, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1734125589858 2024-12-13T21:33:13,477 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b59c4af2fca40ae8877cc9e04b18fe4, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125592018 2024-12-13T21:33:13,477 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fb5aab9198834d298caa8f5a4c8c2d41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125592018 2024-12-13T21:33:13,482 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:13,482 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:13,482 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/16705dd62c4d44b7bede2800ed834201 is 50, key is test_row_0/B:col10/1734125592637/Put/seqid=0 2024-12-13T21:33:13,483 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2e68e69524124d4a85f544def1e538c2 is 50, key is test_row_0/A:col10/1734125592637/Put/seqid=0 2024-12-13T21:33:13,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742370_1546 (size=13255) 2024-12-13T21:33:13,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742369_1545 (size=13255) 2024-12-13T21:33:13,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-13T21:33:13,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,608 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-13T21:33:13,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:13,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:13,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:13,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:13,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:13,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:13,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/e070754b441d4562bd528ece77afdd7d is 50, key is test_row_0/A:col10/1734125592649/Put/seqid=0 2024-12-13T21:33:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742371_1547 (size=12301) 2024-12-13T21:33:13,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-13T21:33:13,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125653819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125653820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125653824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,894 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/16705dd62c4d44b7bede2800ed834201 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16705dd62c4d44b7bede2800ed834201 2024-12-13T21:33:13,896 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 16705dd62c4d44b7bede2800ed834201(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:13,896 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:13,897 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125593475; duration=0sec 2024-12-13T21:33:13,897 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:13,897 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:13,897 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:13,897 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:13,897 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:13,897 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:13,897 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9245eeb787c444d485356730c6962138, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/666a6d0778c546728bfd5223bd87c59e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fd4478a72cee4a64a1f80d4c88da8972] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=36.9 K 2024-12-13T21:33:13,898 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9245eeb787c444d485356730c6962138, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734125589543 2024-12-13T21:33:13,898 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 666a6d0778c546728bfd5223bd87c59e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1734125589858 2024-12-13T21:33:13,898 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fd4478a72cee4a64a1f80d4c88da8972, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125592018 2024-12-13T21:33:13,903 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#464 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:13,903 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/fbf0ae49f49f4d598ce1629d488dda6e is 50, key is test_row_0/C:col10/1734125592637/Put/seqid=0 2024-12-13T21:33:13,905 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/2e68e69524124d4a85f544def1e538c2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2e68e69524124d4a85f544def1e538c2 2024-12-13T21:33:13,908 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 2e68e69524124d4a85f544def1e538c2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:13,908 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:13,908 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125593475; duration=0sec 2024-12-13T21:33:13,908 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:13,908 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:13,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742372_1548 (size=13255) 2024-12-13T21:33:13,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125653925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125653925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:13,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125653930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,015 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/e070754b441d4562bd528ece77afdd7d 2024-12-13T21:33:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/87fcbb7f50044970bf9646ff545de194 is 50, key is test_row_0/B:col10/1734125592649/Put/seqid=0 2024-12-13T21:33:14,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742373_1549 (size=12301) 2024-12-13T21:33:14,024 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/87fcbb7f50044970bf9646ff545de194 2024-12-13T21:33:14,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/78cf95d4107f45b382618a65f5a0f955 is 50, key is test_row_0/C:col10/1734125592649/Put/seqid=0 2024-12-13T21:33:14,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742374_1550 (size=12301) 2024-12-13T21:33:14,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125654129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125654130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125654137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,323 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/fbf0ae49f49f4d598ce1629d488dda6e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fbf0ae49f49f4d598ce1629d488dda6e 2024-12-13T21:33:14,325 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into fbf0ae49f49f4d598ce1629d488dda6e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:14,325 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:14,325 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125593475; duration=0sec 2024-12-13T21:33:14,326 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:14,326 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:14,433 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/78cf95d4107f45b382618a65f5a0f955 2024-12-13T21:33:14,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125654433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/e070754b441d4562bd528ece77afdd7d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e070754b441d4562bd528ece77afdd7d 2024-12-13T21:33:14,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125654435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,438 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e070754b441d4562bd528ece77afdd7d, entries=150, sequenceid=404, filesize=12.0 K 2024-12-13T21:33:14,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/87fcbb7f50044970bf9646ff545de194 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/87fcbb7f50044970bf9646ff545de194 2024-12-13T21:33:14,442 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/87fcbb7f50044970bf9646ff545de194, entries=150, sequenceid=404, filesize=12.0 K 2024-12-13T21:33:14,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/78cf95d4107f45b382618a65f5a0f955 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/78cf95d4107f45b382618a65f5a0f955 2024-12-13T21:33:14,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125654440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,444 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/78cf95d4107f45b382618a65f5a0f955, entries=150, sequenceid=404, filesize=12.0 K 2024-12-13T21:33:14,445 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 836c7ebcedb8aba974e5bf30d5802cfc in 838ms, sequenceid=404, compaction requested=false 2024-12-13T21:33:14,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:14,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:14,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-12-13T21:33:14,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-12-13T21:33:14,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-13T21:33:14,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7520 sec 2024-12-13T21:33:14,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.7540 sec 2024-12-13T21:33:14,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-13T21:33:14,798 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-13T21:33:14,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:14,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-12-13T21:33:14,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:14,800 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:14,800 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:14,800 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:14,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:14,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:14,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-13T21:33:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:14,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:14,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1da60f948c354d4fbc60f2e4e6f336d5 is 50, key is test_row_0/A:col10/1734125593816/Put/seqid=0 2024-12-13T21:33:14,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742375_1551 (size=14741) 2024-12-13T21:33:14,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:14,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:14,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:14,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:14,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:14,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:14,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125654948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125654948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:14,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125654949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125655053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125655054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:15,103 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:15,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:15,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,255 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:15,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:15,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125655257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125655258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1da60f948c354d4fbc60f2e4e6f336d5 2024-12-13T21:33:15,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/5960f57915b148b9a5b4bb3b2352f8d8 is 50, key is test_row_0/B:col10/1734125593816/Put/seqid=0 2024-12-13T21:33:15,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742376_1552 (size=12301) 2024-12-13T21:33:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:15,407 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:15,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:15,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,559 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:15,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:15,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125655562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125655562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,649 DEBUG [Thread-2067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dee2855 to 127.0.0.1:57927 2024-12-13T21:33:15,649 DEBUG [Thread-2067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:15,651 DEBUG [Thread-2065 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0657e1bf to 127.0.0.1:57927 2024-12-13T21:33:15,651 DEBUG [Thread-2065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:15,652 DEBUG [Thread-2073 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27861032 to 127.0.0.1:57927 2024-12-13T21:33:15,652 DEBUG [Thread-2073 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:15,654 DEBUG [Thread-2071 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x677030bd to 127.0.0.1:57927 2024-12-13T21:33:15,654 DEBUG [Thread-2071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:15,655 DEBUG [Thread-2069 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54e8a98a to 127.0.0.1:57927 2024-12-13T21:33:15,655 DEBUG [Thread-2069 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:15,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:15,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/5960f57915b148b9a5b4bb3b2352f8d8 2024-12-13T21:33:15,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/b09d42bd2a3c4e5f86fac5f03c675f03 is 50, key is test_row_0/C:col10/1734125593816/Put/seqid=0 2024-12-13T21:33:15,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742377_1553 (size=12301) 2024-12-13T21:33:15,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:15,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:15,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:15,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:15,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:15,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:15,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:15,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36366 deadline: 1734125655956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:16,021 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:16,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:16,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:16,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:16,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:16,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:16,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:16,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36338 deadline: 1734125656067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:16,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:16,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36342 deadline: 1734125656069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:16,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/b09d42bd2a3c4e5f86fac5f03c675f03 2024-12-13T21:33:16,177 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:16,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:16,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:16,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:16,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:16,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/1da60f948c354d4fbc60f2e4e6f336d5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1da60f948c354d4fbc60f2e4e6f336d5 2024-12-13T21:33:16,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1da60f948c354d4fbc60f2e4e6f336d5, entries=200, sequenceid=434, filesize=14.4 K 2024-12-13T21:33:16,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/5960f57915b148b9a5b4bb3b2352f8d8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/5960f57915b148b9a5b4bb3b2352f8d8 2024-12-13T21:33:16,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/5960f57915b148b9a5b4bb3b2352f8d8, entries=150, sequenceid=434, filesize=12.0 K 2024-12-13T21:33:16,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/b09d42bd2a3c4e5f86fac5f03c675f03 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/b09d42bd2a3c4e5f86fac5f03c675f03 2024-12-13T21:33:16,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/b09d42bd2a3c4e5f86fac5f03c675f03, entries=150, sequenceid=434, filesize=12.0 K 2024-12-13T21:33:16,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 836c7ebcedb8aba974e5bf30d5802cfc in 1252ms, sequenceid=434, compaction requested=true 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:16,192 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 836c7ebcedb8aba974e5bf30d5802cfc:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:16,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:16,192 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:16,193 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40297 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:16,193 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:16,193 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/B is initiating minor compaction (all files) 2024-12-13T21:33:16,193 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/A is initiating minor compaction (all files) 2024-12-13T21:33:16,193 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/B in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,193 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/A in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,193 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16705dd62c4d44b7bede2800ed834201, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/87fcbb7f50044970bf9646ff545de194, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/5960f57915b148b9a5b4bb3b2352f8d8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=37.0 K 2024-12-13T21:33:16,193 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2e68e69524124d4a85f544def1e538c2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e070754b441d4562bd528ece77afdd7d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1da60f948c354d4fbc60f2e4e6f336d5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=39.4 K 2024-12-13T21:33:16,193 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16705dd62c4d44b7bede2800ed834201, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125592018 2024-12-13T21:33:16,193 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e68e69524124d4a85f544def1e538c2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125592018 2024-12-13T21:33:16,194 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e070754b441d4562bd528ece77afdd7d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1734125592645 2024-12-13T21:33:16,194 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87fcbb7f50044970bf9646ff545de194, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1734125592645 2024-12-13T21:33:16,194 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da60f948c354d4fbc60f2e4e6f336d5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1734125593816 2024-12-13T21:33:16,194 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5960f57915b148b9a5b4bb3b2352f8d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1734125593816 2024-12-13T21:33:16,198 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#B#compaction#470 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:16,198 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/218294e98cc244379bd2da159091f347 is 50, key is test_row_0/B:col10/1734125593816/Put/seqid=0 2024-12-13T21:33:16,199 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#A#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:16,199 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/013e1fd35575494c871f7c17a354944c is 50, key is test_row_0/A:col10/1734125593816/Put/seqid=0 2024-12-13T21:33:16,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742378_1554 (size=13357) 2024-12-13T21:33:16,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742379_1555 (size=13357) 2024-12-13T21:33:16,332 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:16,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-13T21:33:16,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,333 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-13T21:33:16,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:16,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:16,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:16,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:16,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:16,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:16,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/156eb075f5c6410cbb825a50e42731ea is 50, key is test_row_1/A:col10/1734125594947/Put/seqid=0 2024-12-13T21:33:16,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742380_1556 (size=9857) 2024-12-13T21:33:16,612 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/218294e98cc244379bd2da159091f347 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/218294e98cc244379bd2da159091f347 2024-12-13T21:33:16,614 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/013e1fd35575494c871f7c17a354944c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/013e1fd35575494c871f7c17a354944c 2024-12-13T21:33:16,617 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/B of 836c7ebcedb8aba974e5bf30d5802cfc into 218294e98cc244379bd2da159091f347(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:16,617 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:16,617 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/B, priority=13, startTime=1734125596192; duration=0sec 2024-12-13T21:33:16,617 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:16,617 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:B 2024-12-13T21:33:16,617 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:16,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:16,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): 836c7ebcedb8aba974e5bf30d5802cfc/C is initiating minor compaction (all files) 2024-12-13T21:33:16,618 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 836c7ebcedb8aba974e5bf30d5802cfc/C in TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:16,618 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fbf0ae49f49f4d598ce1629d488dda6e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/78cf95d4107f45b382618a65f5a0f955, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/b09d42bd2a3c4e5f86fac5f03c675f03] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp, totalSize=37.0 K 2024-12-13T21:33:16,618 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/A of 836c7ebcedb8aba974e5bf30d5802cfc into 013e1fd35575494c871f7c17a354944c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:16,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:16,618 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/A, priority=13, startTime=1734125596192; duration=0sec 2024-12-13T21:33:16,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:16,618 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:A 2024-12-13T21:33:16,618 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbf0ae49f49f4d598ce1629d488dda6e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1734125592018 2024-12-13T21:33:16,619 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78cf95d4107f45b382618a65f5a0f955, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1734125592645 2024-12-13T21:33:16,619 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b09d42bd2a3c4e5f86fac5f03c675f03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1734125593816 2024-12-13T21:33:16,624 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 836c7ebcedb8aba974e5bf30d5802cfc#C#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:16,624 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/ab4c06d41ddc46fa85f86792c5b7fc19 is 50, key is test_row_0/C:col10/1734125593816/Put/seqid=0 2024-12-13T21:33:16,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742381_1557 (size=13357) 2024-12-13T21:33:16,746 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/156eb075f5c6410cbb825a50e42731ea 2024-12-13T21:33:16,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/4a84599e89334eb5afbc323e5ceb22e5 is 50, key is test_row_1/B:col10/1734125594947/Put/seqid=0 2024-12-13T21:33:16,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742382_1558 (size=9857) 2024-12-13T21:33:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:17,037 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/ab4c06d41ddc46fa85f86792c5b7fc19 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ab4c06d41ddc46fa85f86792c5b7fc19 2024-12-13T21:33:17,042 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 836c7ebcedb8aba974e5bf30d5802cfc/C of 836c7ebcedb8aba974e5bf30d5802cfc into ab4c06d41ddc46fa85f86792c5b7fc19(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:17,042 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:17,042 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc., storeName=836c7ebcedb8aba974e5bf30d5802cfc/C, priority=13, startTime=1734125596192; duration=0sec 2024-12-13T21:33:17,042 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:17,042 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 836c7ebcedb8aba974e5bf30d5802cfc:C 2024-12-13T21:33:17,071 DEBUG [Thread-2056 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x134bfe32 to 127.0.0.1:57927 2024-12-13T21:33:17,071 DEBUG [Thread-2056 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:17,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:17,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. as already flushing 2024-12-13T21:33:17,076 DEBUG [Thread-2060 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x646ca555 to 127.0.0.1:57927 2024-12-13T21:33:17,076 DEBUG [Thread-2060 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:17,164 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/4a84599e89334eb5afbc323e5ceb22e5 2024-12-13T21:33:17,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/6f957646b78249cfa61278a3d96669ae is 50, key is test_row_1/C:col10/1734125594947/Put/seqid=0 2024-12-13T21:33:17,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742383_1559 (size=9857) 2024-12-13T21:33:17,355 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:33:17,583 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=443 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/6f957646b78249cfa61278a3d96669ae 2024-12-13T21:33:17,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/156eb075f5c6410cbb825a50e42731ea as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/156eb075f5c6410cbb825a50e42731ea 2024-12-13T21:33:17,596 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/156eb075f5c6410cbb825a50e42731ea, entries=100, sequenceid=443, filesize=9.6 K 2024-12-13T21:33:17,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/4a84599e89334eb5afbc323e5ceb22e5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4a84599e89334eb5afbc323e5ceb22e5 2024-12-13T21:33:17,600 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4a84599e89334eb5afbc323e5ceb22e5, entries=100, sequenceid=443, filesize=9.6 K 2024-12-13T21:33:17,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/6f957646b78249cfa61278a3d96669ae as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/6f957646b78249cfa61278a3d96669ae 2024-12-13T21:33:17,605 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/6f957646b78249cfa61278a3d96669ae, entries=100, sequenceid=443, filesize=9.6 K 2024-12-13T21:33:17,606 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=13.42 KB/13740 for 836c7ebcedb8aba974e5bf30d5802cfc in 1272ms, sequenceid=443, compaction requested=false 2024-12-13T21:33:17,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:17,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:17,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-13T21:33:17,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-13T21:33:17,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-13T21:33:17,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8070 sec 2024-12-13T21:33:17,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 2.8100 sec 2024-12-13T21:33:17,981 DEBUG [Thread-2062 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x224e54da to 127.0.0.1:57927 2024-12-13T21:33:17,981 DEBUG [Thread-2062 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T21:33:18,908 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-13T21:33:20,374 DEBUG [Thread-2058 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b55f2f to 127.0.0.1:57927 2024-12-13T21:33:20,374 DEBUG [Thread-2058 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:20,384 DEBUG [Thread-2054 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d125972 to 127.0.0.1:57927 2024-12-13T21:33:20,385 DEBUG [Thread-2054 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 98 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-12-13T21:33:20,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3095 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9285 rows 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3088 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9264 rows 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3102 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9306 rows 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3090 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9270 rows 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3104 2024-12-13T21:33:20,386 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9312 rows 2024-12-13T21:33:20,386 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:33:20,386 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x315a23ef to 127.0.0.1:57927 2024-12-13T21:33:20,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:20,390 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-13T21:33:20,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-12-13T21:33:20,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:20,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-13T21:33:20,394 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125600393"}]},"ts":"1734125600393"} 2024-12-13T21:33:20,395 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-13T21:33:20,434 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-13T21:33:20,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:33:20,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, UNASSIGN}] 2024-12-13T21:33:20,439 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, UNASSIGN 2024-12-13T21:33:20,440 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=836c7ebcedb8aba974e5bf30d5802cfc, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:20,441 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:33:20,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure 836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:33:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-13T21:33:20,594 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:20,595 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:20,595 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:33:20,595 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing 836c7ebcedb8aba974e5bf30d5802cfc, disabling compactions & flushes 2024-12-13T21:33:20,596 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:20,596 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:20,596 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. after waiting 0 ms 2024-12-13T21:33:20,596 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:20,596 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(2837): Flushing 836c7ebcedb8aba974e5bf30d5802cfc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-13T21:33:20,597 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=A 2024-12-13T21:33:20,597 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:20,597 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=B 2024-12-13T21:33:20,597 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:20,598 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 836c7ebcedb8aba974e5bf30d5802cfc, store=C 2024-12-13T21:33:20,598 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:20,602 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/06abf4fe9ea54265a57f577da0973929 is 50, key is test_row_0/A:col10/1734125600371/Put/seqid=0 2024-12-13T21:33:20,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742384_1560 (size=12301) 2024-12-13T21:33:20,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-13T21:33:20,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-13T21:33:21,009 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/06abf4fe9ea54265a57f577da0973929 2024-12-13T21:33:21,022 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/830efa469c754d6a973dc800b1515f39 is 50, key is test_row_0/B:col10/1734125600371/Put/seqid=0 2024-12-13T21:33:21,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742385_1561 (size=12301) 2024-12-13T21:33:21,428 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/830efa469c754d6a973dc800b1515f39 2024-12-13T21:33:21,432 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/96480ab08d584547aa07af92eb4b45bf is 50, key is test_row_0/C:col10/1734125600371/Put/seqid=0 2024-12-13T21:33:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742386_1562 (size=12301) 2024-12-13T21:33:21,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-13T21:33:21,837 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/96480ab08d584547aa07af92eb4b45bf 2024-12-13T21:33:21,846 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/A/06abf4fe9ea54265a57f577da0973929 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/06abf4fe9ea54265a57f577da0973929 2024-12-13T21:33:21,849 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/06abf4fe9ea54265a57f577da0973929, entries=150, sequenceid=454, filesize=12.0 K 2024-12-13T21:33:21,849 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/B/830efa469c754d6a973dc800b1515f39 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/830efa469c754d6a973dc800b1515f39 2024-12-13T21:33:21,851 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/830efa469c754d6a973dc800b1515f39, entries=150, sequenceid=454, filesize=12.0 K 2024-12-13T21:33:21,852 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/.tmp/C/96480ab08d584547aa07af92eb4b45bf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/96480ab08d584547aa07af92eb4b45bf 2024-12-13T21:33:21,854 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/96480ab08d584547aa07af92eb4b45bf, entries=150, sequenceid=454, filesize=12.0 K 2024-12-13T21:33:21,854 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 836c7ebcedb8aba974e5bf30d5802cfc in 1258ms, sequenceid=454, compaction requested=true 2024-12-13T21:33:21,855 DEBUG [StoreCloser-TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/db64a71babba46be9c4a1700ed85ed28, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/90b27a70c6674030b0cc97680605b607, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/edd6b3b30bfd4622bedc218d34e9b9cd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/8106d07a6b9546f7b999170fbaa1503d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3d12c6ea22d94e3489194d1749f00fa4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/bbd8030bd05343b58580af341e9bccb4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ae41547d1468433ebc4c2ad32008602a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/168e9671908643d980d143bb9f868303, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/d532fe3e60df4b6b99766420c2aa437b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/6944debc839c40b6bc2f42210aae0e6c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/22eecd283c7b4fdd99cea93d092b9cb6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2bcfbca5b2be471191de1bf2f230ec6a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2ff55c735c514ed197dba8ca50c15d05, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/efbfe7629a834596922f0fcf160127d0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/4f1919cc61a0455b940323a302488f3d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c48c7f0a886845d7864d0098a786b5e9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/69199e7d8cbd472cbc71cc91965e47a9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/68219fec449747fe871ed2e67c316181, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/169e49c8aa424bd6aa2a1794e677d863, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/397f16e6589c48cfa797872af3f1a69f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1f384051af9b422caa76b3962f4253d7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1b600dec213649a395f72b304387cbf1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/add90e0a0de847629c2ba28b6dc1e7e6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e3cc3f1d90bf46d3b7f0e39308f96bb5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c9f975a5f37246e5a3bd3c99ccb2c560, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ad9d0abc443141b8a338cec0614890e9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/9b59c4af2fca40ae8877cc9e04b18fe4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2e68e69524124d4a85f544def1e538c2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e070754b441d4562bd528ece77afdd7d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1da60f948c354d4fbc60f2e4e6f336d5] to archive 2024-12-13T21:33:21,855 DEBUG [StoreCloser-TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:33:21,857 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/db64a71babba46be9c4a1700ed85ed28 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/db64a71babba46be9c4a1700ed85ed28 2024-12-13T21:33:21,857 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/90b27a70c6674030b0cc97680605b607 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/90b27a70c6674030b0cc97680605b607 2024-12-13T21:33:21,857 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/edd6b3b30bfd4622bedc218d34e9b9cd to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/edd6b3b30bfd4622bedc218d34e9b9cd 2024-12-13T21:33:21,857 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/8106d07a6b9546f7b999170fbaa1503d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/8106d07a6b9546f7b999170fbaa1503d 2024-12-13T21:33:21,857 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3d12c6ea22d94e3489194d1749f00fa4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3d12c6ea22d94e3489194d1749f00fa4 2024-12-13T21:33:21,857 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/bbd8030bd05343b58580af341e9bccb4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/bbd8030bd05343b58580af341e9bccb4 2024-12-13T21:33:21,858 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ae41547d1468433ebc4c2ad32008602a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ae41547d1468433ebc4c2ad32008602a 2024-12-13T21:33:21,858 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/d532fe3e60df4b6b99766420c2aa437b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/d532fe3e60df4b6b99766420c2aa437b 2024-12-13T21:33:21,858 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/6944debc839c40b6bc2f42210aae0e6c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/6944debc839c40b6bc2f42210aae0e6c 2024-12-13T21:33:21,858 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/168e9671908643d980d143bb9f868303 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/168e9671908643d980d143bb9f868303 2024-12-13T21:33:21,858 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/22eecd283c7b4fdd99cea93d092b9cb6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/22eecd283c7b4fdd99cea93d092b9cb6 2024-12-13T21:33:21,858 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2bcfbca5b2be471191de1bf2f230ec6a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2bcfbca5b2be471191de1bf2f230ec6a 2024-12-13T21:33:21,859 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2ff55c735c514ed197dba8ca50c15d05 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2ff55c735c514ed197dba8ca50c15d05 2024-12-13T21:33:21,859 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/3e9b1cd2f7ce4661b6be7b507e1b3a2c 2024-12-13T21:33:21,859 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/efbfe7629a834596922f0fcf160127d0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/efbfe7629a834596922f0fcf160127d0 2024-12-13T21:33:21,859 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/4f1919cc61a0455b940323a302488f3d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/4f1919cc61a0455b940323a302488f3d 2024-12-13T21:33:21,860 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c48c7f0a886845d7864d0098a786b5e9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c48c7f0a886845d7864d0098a786b5e9 2024-12-13T21:33:21,860 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/69199e7d8cbd472cbc71cc91965e47a9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/69199e7d8cbd472cbc71cc91965e47a9 2024-12-13T21:33:21,860 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/68219fec449747fe871ed2e67c316181 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/68219fec449747fe871ed2e67c316181 2024-12-13T21:33:21,860 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/169e49c8aa424bd6aa2a1794e677d863 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/169e49c8aa424bd6aa2a1794e677d863 2024-12-13T21:33:21,860 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/397f16e6589c48cfa797872af3f1a69f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/397f16e6589c48cfa797872af3f1a69f 2024-12-13T21:33:21,860 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1f384051af9b422caa76b3962f4253d7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1f384051af9b422caa76b3962f4253d7 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/add90e0a0de847629c2ba28b6dc1e7e6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/add90e0a0de847629c2ba28b6dc1e7e6 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1b600dec213649a395f72b304387cbf1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1b600dec213649a395f72b304387cbf1 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e3cc3f1d90bf46d3b7f0e39308f96bb5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e3cc3f1d90bf46d3b7f0e39308f96bb5 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c9f975a5f37246e5a3bd3c99ccb2c560 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/c9f975a5f37246e5a3bd3c99ccb2c560 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ad9d0abc443141b8a338cec0614890e9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/ad9d0abc443141b8a338cec0614890e9 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/9b59c4af2fca40ae8877cc9e04b18fe4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/9b59c4af2fca40ae8877cc9e04b18fe4 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2e68e69524124d4a85f544def1e538c2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/2e68e69524124d4a85f544def1e538c2 2024-12-13T21:33:21,861 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e070754b441d4562bd528ece77afdd7d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/e070754b441d4562bd528ece77afdd7d 2024-12-13T21:33:21,862 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1da60f948c354d4fbc60f2e4e6f336d5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/1da60f948c354d4fbc60f2e4e6f336d5 2024-12-13T21:33:21,862 DEBUG [StoreCloser-TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7ee33d515e534470b484338305a9c66d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/0c9f9b152522496ab54bd8f3994e8c6d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1dfe24aa2ccb4c3ab4c410888355acd9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/74c1c4fe6ccd437faa77cd6340ac22dc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/d1f9fe7855bc4784b3a67fb7a250cfd8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1ff3eff564904d48be7bfa7e979c5ca9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/edfee14373394051a15ec25b13baf36f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/71e93129c05a426893dd4f7a381e0002, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/ced203f34f604af1b975261e0bd5efde, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/3ce93b47b2cb45b3910b206465b9df81, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/dafa2d814a9e4987ab2a80e692cc7fe4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16bbfa6ece6842a98b4ac52554e5e0e6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8e0181c5856348cdac7fbf8dce971c4a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/715b3fc2f0c14c2690268ecfefd420e3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/81e7ac8717f04725b2adebb105b5706f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/e409525a3022478e9807c8b5f2052035, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/77e6e095370c47b6a287add5752c53c3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7b2f7d133dac4fda91166bb9d943ec5d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/99409c043b744c0188c6fa8f8f8b619d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/39f49c7105f04e8885c81c6315a018b3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/c4a7aa0fd8544401871a0d84aeb4b0c4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/be6e62f63ec14aa49d63bc02f6594698, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8403aec803e84c08b00458e15fdfc7cb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/159bcc3b0e2942a397a00d3df3f0fa7e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/bab7c55acd2a44b89fbb1b3ad1c55185, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16705dd62c4d44b7bede2800ed834201, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/fb5aab9198834d298caa8f5a4c8c2d41, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/87fcbb7f50044970bf9646ff545de194, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/5960f57915b148b9a5b4bb3b2352f8d8] to archive 2024-12-13T21:33:21,863 DEBUG [StoreCloser-TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:33:21,864 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/0c9f9b152522496ab54bd8f3994e8c6d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/0c9f9b152522496ab54bd8f3994e8c6d 2024-12-13T21:33:21,864 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1dfe24aa2ccb4c3ab4c410888355acd9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1dfe24aa2ccb4c3ab4c410888355acd9 2024-12-13T21:33:21,864 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/d1f9fe7855bc4784b3a67fb7a250cfd8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/d1f9fe7855bc4784b3a67fb7a250cfd8 2024-12-13T21:33:21,864 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7ee33d515e534470b484338305a9c66d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7ee33d515e534470b484338305a9c66d 2024-12-13T21:33:21,864 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1ff3eff564904d48be7bfa7e979c5ca9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/1ff3eff564904d48be7bfa7e979c5ca9 2024-12-13T21:33:21,865 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/71e93129c05a426893dd4f7a381e0002 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/71e93129c05a426893dd4f7a381e0002 2024-12-13T21:33:21,865 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/edfee14373394051a15ec25b13baf36f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/edfee14373394051a15ec25b13baf36f 2024-12-13T21:33:21,865 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/74c1c4fe6ccd437faa77cd6340ac22dc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/74c1c4fe6ccd437faa77cd6340ac22dc 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/ced203f34f604af1b975261e0bd5efde to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/ced203f34f604af1b975261e0bd5efde 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/3ce93b47b2cb45b3910b206465b9df81 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/3ce93b47b2cb45b3910b206465b9df81 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/dafa2d814a9e4987ab2a80e692cc7fe4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/dafa2d814a9e4987ab2a80e692cc7fe4 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16bbfa6ece6842a98b4ac52554e5e0e6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16bbfa6ece6842a98b4ac52554e5e0e6 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8e0181c5856348cdac7fbf8dce971c4a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8e0181c5856348cdac7fbf8dce971c4a 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/81e7ac8717f04725b2adebb105b5706f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/81e7ac8717f04725b2adebb105b5706f 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/cb5bf7cc3ccb48d7a7f6181e1bf4b645 2024-12-13T21:33:21,866 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/715b3fc2f0c14c2690268ecfefd420e3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/715b3fc2f0c14c2690268ecfefd420e3 2024-12-13T21:33:21,867 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/e409525a3022478e9807c8b5f2052035 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/e409525a3022478e9807c8b5f2052035 2024-12-13T21:33:21,867 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/99409c043b744c0188c6fa8f8f8b619d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/99409c043b744c0188c6fa8f8f8b619d 2024-12-13T21:33:21,867 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7b2f7d133dac4fda91166bb9d943ec5d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/7b2f7d133dac4fda91166bb9d943ec5d 2024-12-13T21:33:21,867 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/be6e62f63ec14aa49d63bc02f6594698 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/be6e62f63ec14aa49d63bc02f6594698 2024-12-13T21:33:21,868 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/39f49c7105f04e8885c81c6315a018b3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/39f49c7105f04e8885c81c6315a018b3 2024-12-13T21:33:21,868 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/77e6e095370c47b6a287add5752c53c3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/77e6e095370c47b6a287add5752c53c3 2024-12-13T21:33:21,868 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8403aec803e84c08b00458e15fdfc7cb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/8403aec803e84c08b00458e15fdfc7cb 2024-12-13T21:33:21,868 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/c4a7aa0fd8544401871a0d84aeb4b0c4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/c4a7aa0fd8544401871a0d84aeb4b0c4 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/159bcc3b0e2942a397a00d3df3f0fa7e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/159bcc3b0e2942a397a00d3df3f0fa7e 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16705dd62c4d44b7bede2800ed834201 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/16705dd62c4d44b7bede2800ed834201 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4ccdb0e5aa6e4d54b4f4c3fa7e697313 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/bab7c55acd2a44b89fbb1b3ad1c55185 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/bab7c55acd2a44b89fbb1b3ad1c55185 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/87fcbb7f50044970bf9646ff545de194 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/87fcbb7f50044970bf9646ff545de194 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/fb5aab9198834d298caa8f5a4c8c2d41 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/fb5aab9198834d298caa8f5a4c8c2d41 2024-12-13T21:33:21,869 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/5960f57915b148b9a5b4bb3b2352f8d8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/5960f57915b148b9a5b4bb3b2352f8d8 2024-12-13T21:33:21,870 DEBUG [StoreCloser-TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/3fffd23dd442433cb394309394862c9b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb588b7c77aa40f381eac5b6e5bb411e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/349b2ed789e04ff58269e1d60eb5199f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9f7b5b77720a44f5b4f38889869e412c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/436c499911164dc981f11efc2798f594, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/81fb736c78ae457da7ec8aa921618ac0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/629f40bd07f64df3b58f5199325e1cda, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/433fd56d946945ba9cf6866effab08b9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/5691e17688844bf1ab144e1b44277e6d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/03122ed8ca934332a713e34db571a557, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/af220dc36f9c4f7faef30d1dc907aac0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/405c833c90a44d818b297f8190da7ed4, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ea3da4b519e7416da3a241997b56a7a7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/4ee1daa1d9e748bca7a0ba385a1d100f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/eb88507695ca40a1962341857388556a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/265d6548271b4409ae0f8dd90ee003cd, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/210ace817ad44acc8a9a2d4ea16e25cb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/a438c7324b4a4d20a9135c320d809a85, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/1d26710c4dd748d09304a79004cb5dd0, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb4da79422b647ca9d177f7238f7ed9b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/88761227d2b74e54a4174356d96a6b6e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bc6632934bf04db2a8afd63200de06fb, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/0255a566795f4137a6f1a1fe9505248c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9245eeb787c444d485356730c6962138, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9e685de5a290495e97a3746d387d388e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/666a6d0778c546728bfd5223bd87c59e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fbf0ae49f49f4d598ce1629d488dda6e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fd4478a72cee4a64a1f80d4c88da8972, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/78cf95d4107f45b382618a65f5a0f955, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/b09d42bd2a3c4e5f86fac5f03c675f03] to archive 2024-12-13T21:33:21,870 DEBUG [StoreCloser-TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/3fffd23dd442433cb394309394862c9b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/3fffd23dd442433cb394309394862c9b 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/349b2ed789e04ff58269e1d60eb5199f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/349b2ed789e04ff58269e1d60eb5199f 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb588b7c77aa40f381eac5b6e5bb411e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb588b7c77aa40f381eac5b6e5bb411e 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/81fb736c78ae457da7ec8aa921618ac0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/81fb736c78ae457da7ec8aa921618ac0 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/436c499911164dc981f11efc2798f594 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/436c499911164dc981f11efc2798f594 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9f7b5b77720a44f5b4f38889869e412c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9f7b5b77720a44f5b4f38889869e412c 2024-12-13T21:33:21,872 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/629f40bd07f64df3b58f5199325e1cda to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/629f40bd07f64df3b58f5199325e1cda 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/5691e17688844bf1ab144e1b44277e6d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/5691e17688844bf1ab144e1b44277e6d 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/d3e3536ce6fd43c3af3a6d9fe6f44eaf 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/af220dc36f9c4f7faef30d1dc907aac0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/af220dc36f9c4f7faef30d1dc907aac0 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/03122ed8ca934332a713e34db571a557 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/03122ed8ca934332a713e34db571a557 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/405c833c90a44d818b297f8190da7ed4 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/405c833c90a44d818b297f8190da7ed4 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ea3da4b519e7416da3a241997b56a7a7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ea3da4b519e7416da3a241997b56a7a7 2024-12-13T21:33:21,873 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/4ee1daa1d9e748bca7a0ba385a1d100f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/4ee1daa1d9e748bca7a0ba385a1d100f 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/eb88507695ca40a1962341857388556a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/eb88507695ca40a1962341857388556a 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/a438c7324b4a4d20a9135c320d809a85 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/a438c7324b4a4d20a9135c320d809a85 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/210ace817ad44acc8a9a2d4ea16e25cb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/210ace817ad44acc8a9a2d4ea16e25cb 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/265d6548271b4409ae0f8dd90ee003cd to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/265d6548271b4409ae0f8dd90ee003cd 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/1d26710c4dd748d09304a79004cb5dd0 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/1d26710c4dd748d09304a79004cb5dd0 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/88761227d2b74e54a4174356d96a6b6e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/88761227d2b74e54a4174356d96a6b6e 2024-12-13T21:33:21,874 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb4da79422b647ca9d177f7238f7ed9b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bb4da79422b647ca9d177f7238f7ed9b 2024-12-13T21:33:21,875 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bc6632934bf04db2a8afd63200de06fb to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/bc6632934bf04db2a8afd63200de06fb 2024-12-13T21:33:21,875 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/666a6d0778c546728bfd5223bd87c59e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/666a6d0778c546728bfd5223bd87c59e 2024-12-13T21:33:21,875 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9245eeb787c444d485356730c6962138 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9245eeb787c444d485356730c6962138 2024-12-13T21:33:21,875 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fbf0ae49f49f4d598ce1629d488dda6e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fbf0ae49f49f4d598ce1629d488dda6e 2024-12-13T21:33:21,875 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/0255a566795f4137a6f1a1fe9505248c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/0255a566795f4137a6f1a1fe9505248c 2024-12-13T21:33:21,875 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9e685de5a290495e97a3746d387d388e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/9e685de5a290495e97a3746d387d388e 2024-12-13T21:33:21,876 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fd4478a72cee4a64a1f80d4c88da8972 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/fd4478a72cee4a64a1f80d4c88da8972 2024-12-13T21:33:21,877 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/433fd56d946945ba9cf6866effab08b9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/433fd56d946945ba9cf6866effab08b9 2024-12-13T21:33:21,877 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/78cf95d4107f45b382618a65f5a0f955 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/78cf95d4107f45b382618a65f5a0f955 2024-12-13T21:33:21,877 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/b09d42bd2a3c4e5f86fac5f03c675f03 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/b09d42bd2a3c4e5f86fac5f03c675f03 2024-12-13T21:33:21,880 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/recovered.edits/457.seqid, newMaxSeqId=457, maxSeqId=1 2024-12-13T21:33:21,881 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc. 2024-12-13T21:33:21,881 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for 836c7ebcedb8aba974e5bf30d5802cfc: 2024-12-13T21:33:21,882 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed 836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:21,882 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=836c7ebcedb8aba974e5bf30d5802cfc, regionState=CLOSED 2024-12-13T21:33:21,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-13T21:33:21,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure 836c7ebcedb8aba974e5bf30d5802cfc, server=fd052dae32be,38989,1734125418878 in 1.4420 sec 2024-12-13T21:33:21,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-13T21:33:21,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=836c7ebcedb8aba974e5bf30d5802cfc, UNASSIGN in 1.4470 sec 2024-12-13T21:33:21,886 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-13T21:33:21,886 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4500 sec 2024-12-13T21:33:21,887 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125601887"}]},"ts":"1734125601887"} 2024-12-13T21:33:21,887 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-13T21:33:21,934 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-13T21:33:21,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5450 sec 2024-12-13T21:33:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-13T21:33:22,502 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-13T21:33:22,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-12-13T21:33:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,504 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=158, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-13T21:33:22,505 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=158, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,506 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:22,510 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/recovered.edits] 2024-12-13T21:33:22,514 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/156eb075f5c6410cbb825a50e42731ea to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/156eb075f5c6410cbb825a50e42731ea 2024-12-13T21:33:22,514 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/013e1fd35575494c871f7c17a354944c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/013e1fd35575494c871f7c17a354944c 2024-12-13T21:33:22,514 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/06abf4fe9ea54265a57f577da0973929 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/A/06abf4fe9ea54265a57f577da0973929 2024-12-13T21:33:22,519 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/218294e98cc244379bd2da159091f347 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/218294e98cc244379bd2da159091f347 2024-12-13T21:33:22,519 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4a84599e89334eb5afbc323e5ceb22e5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/4a84599e89334eb5afbc323e5ceb22e5 2024-12-13T21:33:22,519 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/830efa469c754d6a973dc800b1515f39 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/B/830efa469c754d6a973dc800b1515f39 2024-12-13T21:33:22,524 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ab4c06d41ddc46fa85f86792c5b7fc19 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/ab4c06d41ddc46fa85f86792c5b7fc19 2024-12-13T21:33:22,524 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/96480ab08d584547aa07af92eb4b45bf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/96480ab08d584547aa07af92eb4b45bf 2024-12-13T21:33:22,524 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/6f957646b78249cfa61278a3d96669ae to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/C/6f957646b78249cfa61278a3d96669ae 2024-12-13T21:33:22,526 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/recovered.edits/457.seqid to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc/recovered.edits/457.seqid 2024-12-13T21:33:22,527 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/836c7ebcedb8aba974e5bf30d5802cfc 2024-12-13T21:33:22,527 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-13T21:33:22,528 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=158, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,530 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-13T21:33:22,531 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-13T21:33:22,532 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=158, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,532 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-13T21:33:22,532 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734125602532"}]},"ts":"9223372036854775807"} 2024-12-13T21:33:22,534 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T21:33:22,534 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 836c7ebcedb8aba974e5bf30d5802cfc, NAME => 'TestAcidGuarantees,,1734125547360.836c7ebcedb8aba974e5bf30d5802cfc.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T21:33:22,534 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-13T21:33:22,534 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734125602534"}]},"ts":"9223372036854775807"} 2024-12-13T21:33:22,536 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-13T21:33:22,576 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=158, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 74 msec 2024-12-13T21:33:22,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-13T21:33:22,606 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-12-13T21:33:22,619 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=243 (was 244), OpenFileDescriptor=447 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=471 (was 439) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 11), AvailableMemoryMB=4111 (was 1285) - AvailableMemoryMB LEAK? - 2024-12-13T21:33:22,630 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=243, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=471, ProcessCount=9, AvailableMemoryMB=4111 2024-12-13T21:33:22,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:33:22,632 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:33:22,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:22,633 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T21:33:22,634 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:22,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 159 2024-12-13T21:33:22,634 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T21:33:22,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-13T21:33:22,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742387_1563 (size=963) 2024-12-13T21:33:22,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-13T21:33:22,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-13T21:33:23,045 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05 2024-12-13T21:33:23,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742388_1564 (size=53) 2024-12-13T21:33:23,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-13T21:33:23,454 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:33:23,454 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c4e9541490a67306648a9c57a40aab49, disabling compactions & flushes 2024-12-13T21:33:23,454 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,455 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,455 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. after waiting 0 ms 2024-12-13T21:33:23,455 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,455 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,455 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:23,456 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T21:33:23,456 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734125603456"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734125603456"}]},"ts":"1734125603456"} 2024-12-13T21:33:23,457 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T21:33:23,458 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T21:33:23,458 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125603458"}]},"ts":"1734125603458"} 2024-12-13T21:33:23,459 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-13T21:33:23,484 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, ASSIGN}] 2024-12-13T21:33:23,485 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, ASSIGN 2024-12-13T21:33:23,486 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, ASSIGN; state=OFFLINE, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=false 2024-12-13T21:33:23,636 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:23,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; OpenRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:33:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-13T21:33:23,791 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:23,796 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,797 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7285): Opening region: {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:33:23,797 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,798 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:33:23,798 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7327): checking encryption for c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,798 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7330): checking classloading for c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,799 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,800 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:33:23,800 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4e9541490a67306648a9c57a40aab49 columnFamilyName A 2024-12-13T21:33:23,801 DEBUG [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:23,801 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(327): Store=c4e9541490a67306648a9c57a40aab49/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:33:23,801 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,802 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:33:23,803 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4e9541490a67306648a9c57a40aab49 columnFamilyName B 2024-12-13T21:33:23,803 DEBUG [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:23,803 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(327): Store=c4e9541490a67306648a9c57a40aab49/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:33:23,804 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,805 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:33:23,805 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4e9541490a67306648a9c57a40aab49 columnFamilyName C 2024-12-13T21:33:23,805 DEBUG [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:23,806 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(327): Store=c4e9541490a67306648a9c57a40aab49/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:33:23,806 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,807 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,807 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,809 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:33:23,811 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1085): writing seq id for c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:23,813 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T21:33:23,814 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1102): Opened c4e9541490a67306648a9c57a40aab49; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70195525, jitterRate=0.045994833111763}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:33:23,815 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1001): Region open journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:23,815 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., pid=161, masterSystemTime=1734125603791 2024-12-13T21:33:23,817 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,817 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:23,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=OPEN, openSeqNum=2, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:23,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-13T21:33:23,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; OpenRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 in 180 msec 2024-12-13T21:33:23,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-13T21:33:23,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, ASSIGN in 335 msec 2024-12-13T21:33:23,821 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T21:33:23,821 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125603821"}]},"ts":"1734125603821"} 2024-12-13T21:33:23,822 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-13T21:33:23,834 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T21:33:23,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2020 sec 2024-12-13T21:33:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-13T21:33:24,743 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-13T21:33:24,744 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79a7bd2b to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40e55f2a 2024-12-13T21:33:24,812 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b739a35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:24,816 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:24,819 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48898, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:24,821 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T21:33:24,823 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T21:33:24,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-13T21:33:24,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T21:33:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742389_1565 (size=999) 2024-12-13T21:33:25,240 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-13T21:33:25,240 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-13T21:33:25,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:33:25,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, REOPEN/MOVE}] 2024-12-13T21:33:25,249 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, REOPEN/MOVE 2024-12-13T21:33:25,250 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,251 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:33:25,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; CloseRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:33:25,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,404 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(124): Close c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,405 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:33:25,405 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1681): Closing c4e9541490a67306648a9c57a40aab49, disabling compactions & flushes 2024-12-13T21:33:25,405 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,405 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,405 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. after waiting 0 ms 2024-12-13T21:33:25,405 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,409 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-13T21:33:25,410 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,410 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1635): Region close journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:25,410 WARN [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegionServer(3786): Not adding moved region record: c4e9541490a67306648a9c57a40aab49 to self. 2024-12-13T21:33:25,411 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(170): Closed c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,411 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=CLOSED 2024-12-13T21:33:25,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-13T21:33:25,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; CloseRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 in 161 msec 2024-12-13T21:33:25,413 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, REOPEN/MOVE; state=CLOSED, location=fd052dae32be,38989,1734125418878; forceNewPlan=false, retain=true 2024-12-13T21:33:25,564 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=OPENING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:33:25,716 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,721 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,721 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} 2024-12-13T21:33:25,722 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,722 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T21:33:25,722 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,722 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,725 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,726 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:33:25,726 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4e9541490a67306648a9c57a40aab49 columnFamilyName A 2024-12-13T21:33:25,728 DEBUG [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:25,728 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(327): Store=c4e9541490a67306648a9c57a40aab49/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:33:25,729 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,729 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:33:25,730 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4e9541490a67306648a9c57a40aab49 columnFamilyName B 2024-12-13T21:33:25,730 DEBUG [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:25,730 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(327): Store=c4e9541490a67306648a9c57a40aab49/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:33:25,730 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,731 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-13T21:33:25,731 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4e9541490a67306648a9c57a40aab49 columnFamilyName C 2024-12-13T21:33:25,731 DEBUG [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:25,732 INFO [StoreOpener-c4e9541490a67306648a9c57a40aab49-1 {}] regionserver.HStore(327): Store=c4e9541490a67306648a9c57a40aab49/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T21:33:25,732 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,733 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,734 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,737 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T21:33:25,739 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,740 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened c4e9541490a67306648a9c57a40aab49; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61517108, jitterRate=-0.0833236575126648}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T21:33:25,741 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:25,741 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., pid=166, masterSystemTime=1734125605716 2024-12-13T21:33:25,743 DEBUG [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,743 INFO [RS_OPEN_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:25,743 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=OPEN, openSeqNum=5, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-13T21:33:25,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 in 179 msec 2024-12-13T21:33:25,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-13T21:33:25,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, REOPEN/MOVE in 497 msec 2024-12-13T21:33:25,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-13T21:33:25,748 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 502 msec 2024-12-13T21:33:25,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 923 msec 2024-12-13T21:33:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T21:33:25,751 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d688bcb to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@271e8143 2024-12-13T21:33:25,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20bb05a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,796 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31f7e171 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b06a95 2024-12-13T21:33:25,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a5ecd59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,810 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c078737 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d02ace0 2024-12-13T21:33:25,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61da8c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,818 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bf8843a to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63054209 2024-12-13T21:33:25,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560a8819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,826 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76670256 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fbb1399 2024-12-13T21:33:25,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3df30e37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,835 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51fccca6 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@745bf218 2024-12-13T21:33:25,843 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336d4b92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,843 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x539997ae to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78f964f7 2024-12-13T21:33:25,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@219191a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,851 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65b56307 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@56586d9a 2024-12-13T21:33:25,859 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b69e269, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,860 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a733412 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fd2ba7b 2024-12-13T21:33:25,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b9c7f42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,868 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a684cd4 to 127.0.0.1:57927 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70532680 2024-12-13T21:33:25,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@84d21a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T21:33:25,882 DEBUG [hconnection-0x5b6b1cc8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:25,883 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,883 DEBUG [hconnection-0x3feabb43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-13T21:33:25,884 DEBUG [hconnection-0x63dd5ff7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,884 DEBUG [hconnection-0x7ffd98bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,884 DEBUG [hconnection-0x4db6ccfc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:25,884 DEBUG [hconnection-0x4c900550-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,885 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:25,885 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,885 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,885 DEBUG [hconnection-0x1c91ff9f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,885 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,885 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48934, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,885 DEBUG [hconnection-0x50f513ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,886 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:25,886 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,886 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:25,886 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,887 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,888 DEBUG [hconnection-0x35d859b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,888 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,890 DEBUG [hconnection-0x68e2b825-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T21:33:25,891 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T21:33:25,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:25,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-13T21:33:25,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:25,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:25,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:25,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:25,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:25,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:25,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125665905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125665906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125665908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125665908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125665908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:25,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f6cd70b65e714e1e8aaa46f20ed6a7c7_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125605890/Put/seqid=0 2024-12-13T21:33:25,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742390_1566 (size=9714) 2024-12-13T21:33:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:26,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125666008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125666010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125666010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125666010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125666010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-13T21:33:26,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:26,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:26,191 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-13T21:33:26,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:26,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125666211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125666213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125666213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125666213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125666213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,322 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:26,324 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f6cd70b65e714e1e8aaa46f20ed6a7c7_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f6cd70b65e714e1e8aaa46f20ed6a7c7_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:26,325 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c0ed2f7914c5476b856ffc1e693a74f1, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:26,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c0ed2f7914c5476b856ffc1e693a74f1 is 175, key is test_row_0/A:col10/1734125605890/Put/seqid=0 2024-12-13T21:33:26,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742391_1567 (size=22361) 2024-12-13T21:33:26,343 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-13T21:33:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:26,495 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-13T21:33:26,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:26,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125666513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125666515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125666515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125666516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:26,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125666516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-13T21:33:26,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:26,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:26,728 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c0ed2f7914c5476b856ffc1e693a74f1 2024-12-13T21:33:26,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/2683bf9dfb4c4294b6dca3416a2fede9 is 50, key is test_row_0/B:col10/1734125605890/Put/seqid=0 2024-12-13T21:33:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742392_1568 (size=9657) 2024-12-13T21:33:26,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/2683bf9dfb4c4294b6dca3416a2fede9 2024-12-13T21:33:26,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3743238a46bd42dab23b43d25eeb8e5f is 50, key is test_row_0/C:col10/1734125605890/Put/seqid=0 2024-12-13T21:33:26,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742393_1569 (size=9657) 2024-12-13T21:33:26,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3743238a46bd42dab23b43d25eeb8e5f 2024-12-13T21:33:26,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c0ed2f7914c5476b856ffc1e693a74f1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1 2024-12-13T21:33:26,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1, entries=100, sequenceid=16, filesize=21.8 K 2024-12-13T21:33:26,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/2683bf9dfb4c4294b6dca3416a2fede9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/2683bf9dfb4c4294b6dca3416a2fede9 2024-12-13T21:33:26,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/2683bf9dfb4c4294b6dca3416a2fede9, entries=100, sequenceid=16, filesize=9.4 K 2024-12-13T21:33:26,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3743238a46bd42dab23b43d25eeb8e5f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3743238a46bd42dab23b43d25eeb8e5f 2024-12-13T21:33:26,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3743238a46bd42dab23b43d25eeb8e5f, entries=100, sequenceid=16, filesize=9.4 K 2024-12-13T21:33:26,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c4e9541490a67306648a9c57a40aab49 in 887ms, sequenceid=16, compaction requested=false 2024-12-13T21:33:26,779 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-13T21:33:26,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:26,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:26,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-13T21:33:26,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:26,800 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:33:26,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:26,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:26,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:26,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:26,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:26,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:26,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213424df36543814ec7a58e128ef9a729d2_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125605906/Put/seqid=0 2024-12-13T21:33:26,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742394_1570 (size=12154) 2024-12-13T21:33:26,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:27,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:27,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125667022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125667022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125667022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125667023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125667023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125667124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125667124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125667125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125667125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125667125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:27,211 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213424df36543814ec7a58e128ef9a729d2_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213424df36543814ec7a58e128ef9a729d2_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/602651688b58466bb7a50b8a31164082, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:27,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/602651688b58466bb7a50b8a31164082 is 175, key is test_row_0/A:col10/1734125605906/Put/seqid=0 2024-12-13T21:33:27,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742395_1571 (size=30955) 2024-12-13T21:33:27,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125667326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125667326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125667327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125667327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125667328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,400 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T21:33:27,615 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/602651688b58466bb7a50b8a31164082 2024-12-13T21:33:27,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/539006ec76b045ae95da303de031a45a is 50, key is test_row_0/B:col10/1734125605906/Put/seqid=0 2024-12-13T21:33:27,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742396_1572 (size=12001) 2024-12-13T21:33:27,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125667628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125667629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125667629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125667630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:27,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125667630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:28,023 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/539006ec76b045ae95da303de031a45a 2024-12-13T21:33:28,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/88e2c93c57a24bd89baa1d71905886f7 is 50, key is test_row_0/C:col10/1734125605906/Put/seqid=0 2024-12-13T21:33:28,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742397_1573 (size=12001) 2024-12-13T21:33:28,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:28,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125668130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:28,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125668132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:28,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125668133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:28,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125668133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:28,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:28,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125668135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:28,437 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/88e2c93c57a24bd89baa1d71905886f7 2024-12-13T21:33:28,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/602651688b58466bb7a50b8a31164082 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082 2024-12-13T21:33:28,442 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082, entries=150, sequenceid=41, filesize=30.2 K 2024-12-13T21:33:28,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/539006ec76b045ae95da303de031a45a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/539006ec76b045ae95da303de031a45a 2024-12-13T21:33:28,445 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/539006ec76b045ae95da303de031a45a, entries=150, sequenceid=41, filesize=11.7 K 2024-12-13T21:33:28,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/88e2c93c57a24bd89baa1d71905886f7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/88e2c93c57a24bd89baa1d71905886f7 2024-12-13T21:33:28,448 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/88e2c93c57a24bd89baa1d71905886f7, entries=150, sequenceid=41, filesize=11.7 K 2024-12-13T21:33:28,449 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c4e9541490a67306648a9c57a40aab49 in 1648ms, sequenceid=41, compaction requested=false 2024-12-13T21:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-13T21:33:28,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-13T21:33:28,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-13T21:33:28,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5630 sec 2024-12-13T21:33:28,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.5690 sec 2024-12-13T21:33:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:29,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-13T21:33:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:29,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:29,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213c82f94c494904dbc92a530e8063127e6_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:29,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742398_1574 (size=12154) 2024-12-13T21:33:29,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125669150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125669151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125669152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125669152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125669152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125669253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125669253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125669254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125669255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125669255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125669455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125669456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125669456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125669457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125669458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,546 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:29,548 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213c82f94c494904dbc92a530e8063127e6_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c82f94c494904dbc92a530e8063127e6_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:29,549 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/29fed128636747c9ae64310bbf453c79, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:29,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/29fed128636747c9ae64310bbf453c79 is 175, key is test_row_0/A:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742399_1575 (size=30955) 2024-12-13T21:33:29,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125669758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125669759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125669759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125669759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125669760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:29,954 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/29fed128636747c9ae64310bbf453c79 2024-12-13T21:33:29,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/66cfae4cb89148ad858d69eaa72ed6f1 is 50, key is test_row_0/B:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:29,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742400_1576 (size=12001) 2024-12-13T21:33:29,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T21:33:29,988 INFO [Thread-2513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-13T21:33:29,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:29,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-13T21:33:29,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:29,990 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:29,991 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:29,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:30,142 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-13T21:33:30,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:30,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:30,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125670263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125670263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125670263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:30,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125670263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125670265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:30,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-13T21:33:30,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:30,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/66cfae4cb89148ad858d69eaa72ed6f1 2024-12-13T21:33:30,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/4f40d1c8b7ff4677bd704bf8cef90a2c is 50, key is test_row_0/C:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:30,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742401_1577 (size=12001) 2024-12-13T21:33:30,445 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-13T21:33:30,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:30,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:30,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-13T21:33:30,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:30,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,747 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-13T21:33:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:30,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,748 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:30,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/4f40d1c8b7ff4677bd704bf8cef90a2c 2024-12-13T21:33:30,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/29fed128636747c9ae64310bbf453c79 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79 2024-12-13T21:33:30,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79, entries=150, sequenceid=53, filesize=30.2 K 2024-12-13T21:33:30,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/66cfae4cb89148ad858d69eaa72ed6f1 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/66cfae4cb89148ad858d69eaa72ed6f1 2024-12-13T21:33:30,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/66cfae4cb89148ad858d69eaa72ed6f1, entries=150, sequenceid=53, filesize=11.7 K 2024-12-13T21:33:30,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/4f40d1c8b7ff4677bd704bf8cef90a2c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/4f40d1c8b7ff4677bd704bf8cef90a2c 2024-12-13T21:33:30,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/4f40d1c8b7ff4677bd704bf8cef90a2c, entries=150, sequenceid=53, filesize=11.7 K 2024-12-13T21:33:30,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c4e9541490a67306648a9c57a40aab49 in 1662ms, sequenceid=53, compaction requested=true 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:30,798 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:30,798 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:30,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84271 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/B is initiating minor compaction (all files) 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/A is initiating minor compaction (all files) 2024-12-13T21:33:30,799 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/A in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,799 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/B in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,799 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=82.3 K 2024-12-13T21:33:30,799 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/2683bf9dfb4c4294b6dca3416a2fede9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/539006ec76b045ae95da303de031a45a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/66cfae4cb89148ad858d69eaa72ed6f1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=32.9 K 2024-12-13T21:33:30,799 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79] 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 2683bf9dfb4c4294b6dca3416a2fede9, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734125605890 2024-12-13T21:33:30,799 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0ed2f7914c5476b856ffc1e693a74f1, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734125605890 2024-12-13T21:33:30,800 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 539006ec76b045ae95da303de031a45a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734125605902 2024-12-13T21:33:30,800 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 602651688b58466bb7a50b8a31164082, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734125605902 2024-12-13T21:33:30,800 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29fed128636747c9ae64310bbf453c79, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734125607021 2024-12-13T21:33:30,800 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 66cfae4cb89148ad858d69eaa72ed6f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734125607021 2024-12-13T21:33:30,805 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:30,805 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#B#compaction#488 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:30,806 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/9a5dad32c0d3441e8774004f1b7c88c3 is 50, key is test_row_0/B:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:30,807 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213373c34f249d345e4b562064b969a8fa6_c4e9541490a67306648a9c57a40aab49 store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:30,808 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213373c34f249d345e4b562064b969a8fa6_c4e9541490a67306648a9c57a40aab49, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:30,808 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213373c34f249d345e4b562064b969a8fa6_c4e9541490a67306648a9c57a40aab49 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:30,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742403_1579 (size=4469) 2024-12-13T21:33:30,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742402_1578 (size=12104) 2024-12-13T21:33:30,899 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:30,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:30,900 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:30,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412138ea06c36e36e4fa595390e76a29c3707_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125609152/Put/seqid=0 2024-12-13T21:33:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742404_1580 (size=12154) 2024-12-13T21:33:31,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:31,224 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#A#compaction#489 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:31,225 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e6f5cc8329014e0fa13a0f66a813087d is 175, key is test_row_0/A:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:31,227 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/9a5dad32c0d3441e8774004f1b7c88c3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/9a5dad32c0d3441e8774004f1b7c88c3 2024-12-13T21:33:31,231 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/B of c4e9541490a67306648a9c57a40aab49 into 9a5dad32c0d3441e8774004f1b7c88c3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:31,231 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:31,231 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/B, priority=13, startTime=1734125610798; duration=0sec 2024-12-13T21:33:31,231 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:31,231 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:B 2024-12-13T21:33:31,231 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:31,232 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:31,232 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/C is initiating minor compaction (all files) 2024-12-13T21:33:31,232 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/C in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:31,232 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3743238a46bd42dab23b43d25eeb8e5f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/88e2c93c57a24bd89baa1d71905886f7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/4f40d1c8b7ff4677bd704bf8cef90a2c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=32.9 K 2024-12-13T21:33:31,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742405_1581 (size=31058) 2024-12-13T21:33:31,233 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3743238a46bd42dab23b43d25eeb8e5f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734125605890 2024-12-13T21:33:31,233 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 88e2c93c57a24bd89baa1d71905886f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734125605902 2024-12-13T21:33:31,233 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f40d1c8b7ff4677bd704bf8cef90a2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734125607021 2024-12-13T21:33:31,235 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e6f5cc8329014e0fa13a0f66a813087d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e6f5cc8329014e0fa13a0f66a813087d 2024-12-13T21:33:31,237 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#C#compaction#491 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:31,238 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/50d44a13718e47e8b35e4434dc558037 is 50, key is test_row_0/C:col10/1734125609135/Put/seqid=0 2024-12-13T21:33:31,238 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/A of c4e9541490a67306648a9c57a40aab49 into e6f5cc8329014e0fa13a0f66a813087d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:31,238 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:31,238 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/A, priority=13, startTime=1734125610798; duration=0sec 2024-12-13T21:33:31,238 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:31,238 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:A 2024-12-13T21:33:31,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742406_1582 (size=12104) 2024-12-13T21:33:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:31,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:31,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125671272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125671272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125671273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125671274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125671275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:31,311 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412138ea06c36e36e4fa595390e76a29c3707_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412138ea06c36e36e4fa595390e76a29c3707_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:31,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/2267d3233d344fcd8f508c0ae23daf9a, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:31,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/2267d3233d344fcd8f508c0ae23daf9a is 175, key is test_row_0/A:col10/1734125609152/Put/seqid=0 2024-12-13T21:33:31,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742407_1583 (size=30955) 2024-12-13T21:33:31,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125671375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125671375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125671377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125671378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125671576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125671577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125671579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125671579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,644 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/50d44a13718e47e8b35e4434dc558037 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/50d44a13718e47e8b35e4434dc558037 2024-12-13T21:33:31,646 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/C of c4e9541490a67306648a9c57a40aab49 into 50d44a13718e47e8b35e4434dc558037(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:31,646 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:31,646 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/C, priority=13, startTime=1734125610798; duration=0sec 2024-12-13T21:33:31,646 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:31,646 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:C 2024-12-13T21:33:31,716 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/2267d3233d344fcd8f508c0ae23daf9a 2024-12-13T21:33:31,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/d11d7e9be7d544f0a4e47dfea22d4720 is 50, key is test_row_0/B:col10/1734125609152/Put/seqid=0 2024-12-13T21:33:31,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742408_1584 (size=12001) 2024-12-13T21:33:31,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125671878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125671880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125671882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:31,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125671883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:32,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:32,126 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/d11d7e9be7d544f0a4e47dfea22d4720 2024-12-13T21:33:32,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/39780e251a324aee8dd860ea9dc5a400 is 50, key is test_row_0/C:col10/1734125609152/Put/seqid=0 2024-12-13T21:33:32,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742409_1585 (size=12001) 2024-12-13T21:33:32,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:32,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125672384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:32,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:32,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125672385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:32,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:32,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125672386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:32,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:32,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125672386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:32,533 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/39780e251a324aee8dd860ea9dc5a400 2024-12-13T21:33:32,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/2267d3233d344fcd8f508c0ae23daf9a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a 2024-12-13T21:33:32,543 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a, entries=150, sequenceid=78, filesize=30.2 K 2024-12-13T21:33:32,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/d11d7e9be7d544f0a4e47dfea22d4720 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/d11d7e9be7d544f0a4e47dfea22d4720 2024-12-13T21:33:32,547 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/d11d7e9be7d544f0a4e47dfea22d4720, entries=150, sequenceid=78, filesize=11.7 K 2024-12-13T21:33:32,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/39780e251a324aee8dd860ea9dc5a400 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/39780e251a324aee8dd860ea9dc5a400 2024-12-13T21:33:32,550 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/39780e251a324aee8dd860ea9dc5a400, entries=150, sequenceid=78, filesize=11.7 K 2024-12-13T21:33:32,551 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for c4e9541490a67306648a9c57a40aab49 in 1651ms, sequenceid=78, compaction requested=false 2024-12-13T21:33:32,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:32,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:32,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-13T21:33:32,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-13T21:33:32,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-13T21:33:32,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5610 sec 2024-12-13T21:33:32,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.5640 sec 2024-12-13T21:33:33,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:33,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-13T21:33:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:33,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:33,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213db91fb8194a241f08acc0f229985d781_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:33,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742410_1586 (size=12154) 2024-12-13T21:33:33,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125673330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125673388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125673390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125673394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125673395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125673432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125673635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:33,702 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:33,705 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213db91fb8194a241f08acc0f229985d781_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213db91fb8194a241f08acc0f229985d781_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:33,705 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e2752f6dd3cc41109e6f8b28bd1ae58e, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:33,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e2752f6dd3cc41109e6f8b28bd1ae58e is 175, key is test_row_0/A:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:33,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742411_1587 (size=30955) 2024-12-13T21:33:33,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:33,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125673939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:34,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-13T21:33:34,094 INFO [Thread-2513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-13T21:33:34,094 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-13T21:33:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-13T21:33:34,096 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:34,096 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:34,096 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:34,109 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e2752f6dd3cc41109e6f8b28bd1ae58e 2024-12-13T21:33:34,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/a8bca196c8b64e8082cd94c841475922 is 50, key is test_row_0/B:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:34,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742412_1588 (size=12001) 2024-12-13T21:33:34,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/a8bca196c8b64e8082cd94c841475922 2024-12-13T21:33:34,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3592de98662e4b5ba3c0de8652c34d92 is 50, key is test_row_0/C:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:34,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742413_1589 (size=12001) 2024-12-13T21:33:34,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-13T21:33:34,247 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:34,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-13T21:33:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:34,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:34,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:34,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:34,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-13T21:33:34,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:34,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-13T21:33:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:34,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:34,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:34,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125674444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:34,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3592de98662e4b5ba3c0de8652c34d92 2024-12-13T21:33:34,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e2752f6dd3cc41109e6f8b28bd1ae58e as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e 2024-12-13T21:33:34,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e, entries=150, sequenceid=94, filesize=30.2 K 2024-12-13T21:33:34,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/a8bca196c8b64e8082cd94c841475922 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a8bca196c8b64e8082cd94c841475922 2024-12-13T21:33:34,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a8bca196c8b64e8082cd94c841475922, entries=150, sequenceid=94, filesize=11.7 K 2024-12-13T21:33:34,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3592de98662e4b5ba3c0de8652c34d92 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3592de98662e4b5ba3c0de8652c34d92 2024-12-13T21:33:34,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3592de98662e4b5ba3c0de8652c34d92, entries=150, sequenceid=94, filesize=11.7 K 2024-12-13T21:33:34,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for c4e9541490a67306648a9c57a40aab49 in 1244ms, sequenceid=94, compaction requested=true 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:34,537 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:34,537 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:34,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/A is initiating minor compaction (all files) 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/B is initiating minor compaction (all files) 2024-12-13T21:33:34,538 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/A in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,538 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e6f5cc8329014e0fa13a0f66a813087d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=90.8 K 2024-12-13T21:33:34,538 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e6f5cc8329014e0fa13a0f66a813087d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e] 2024-12-13T21:33:34,538 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/B in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,538 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/9a5dad32c0d3441e8774004f1b7c88c3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/d11d7e9be7d544f0a4e47dfea22d4720, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a8bca196c8b64e8082cd94c841475922] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=35.3 K 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6f5cc8329014e0fa13a0f66a813087d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734125607021 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a5dad32c0d3441e8774004f1b7c88c3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734125607021 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2267d3233d344fcd8f508c0ae23daf9a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734125609148 2024-12-13T21:33:34,538 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d11d7e9be7d544f0a4e47dfea22d4720, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734125609148 2024-12-13T21:33:34,539 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2752f6dd3cc41109e6f8b28bd1ae58e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734125611272 2024-12-13T21:33:34,539 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a8bca196c8b64e8082cd94c841475922, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734125611272 2024-12-13T21:33:34,543 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:34,544 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#B#compaction#497 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:34,544 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/71865b1ae511432d90db39863012cf7a is 50, key is test_row_0/B:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:34,546 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412138029dcf801c946be86bca7f392fe9197_c4e9541490a67306648a9c57a40aab49 store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:34,548 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412138029dcf801c946be86bca7f392fe9197_c4e9541490a67306648a9c57a40aab49, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:34,548 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412138029dcf801c946be86bca7f392fe9197_c4e9541490a67306648a9c57a40aab49 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:34,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:34,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,552 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:34,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:34,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742414_1590 (size=12207) 2024-12-13T21:33:34,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213c72cecd997b5494fb5183f7c55e53575_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125613330/Put/seqid=0 2024-12-13T21:33:34,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742415_1591 (size=4469) 2024-12-13T21:33:34,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742416_1592 (size=12154) 2024-12-13T21:33:34,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-13T21:33:34,967 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/71865b1ae511432d90db39863012cf7a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/71865b1ae511432d90db39863012cf7a 2024-12-13T21:33:34,970 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/B of c4e9541490a67306648a9c57a40aab49 into 71865b1ae511432d90db39863012cf7a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:34,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:34,971 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/B, priority=13, startTime=1734125614537; duration=0sec 2024-12-13T21:33:34,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:34,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:B 2024-12-13T21:33:34,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:34,971 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:34,972 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/C is initiating minor compaction (all files) 2024-12-13T21:33:34,972 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/C in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:34,972 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/50d44a13718e47e8b35e4434dc558037, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/39780e251a324aee8dd860ea9dc5a400, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3592de98662e4b5ba3c0de8652c34d92] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=35.3 K 2024-12-13T21:33:34,972 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 50d44a13718e47e8b35e4434dc558037, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734125607021 2024-12-13T21:33:34,972 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 39780e251a324aee8dd860ea9dc5a400, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734125609148 2024-12-13T21:33:34,972 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3592de98662e4b5ba3c0de8652c34d92, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734125611272 2024-12-13T21:33:34,977 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#C#compaction#500 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:34,978 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/fff0829e157b4b90bda86ee5317bae6d is 50, key is test_row_0/C:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:34,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742417_1593 (size=12207) 2024-12-13T21:33:34,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:34,989 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#A#compaction#498 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:34,989 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e326877d7a6e486fbc13bd1e1306f6ce is 175, key is test_row_0/A:col10/1734125611272/Put/seqid=0 2024-12-13T21:33:34,991 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213c72cecd997b5494fb5183f7c55e53575_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c72cecd997b5494fb5183f7c55e53575_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:34,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c29db90341064e0e89e2c0da326b3098, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:34,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c29db90341064e0e89e2c0da326b3098 is 175, key is test_row_0/A:col10/1734125613330/Put/seqid=0 2024-12-13T21:33:34,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742418_1594 (size=31161) 2024-12-13T21:33:35,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742419_1595 (size=30955) 2024-12-13T21:33:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-13T21:33:35,386 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/fff0829e157b4b90bda86ee5317bae6d as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/fff0829e157b4b90bda86ee5317bae6d 2024-12-13T21:33:35,389 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/C of c4e9541490a67306648a9c57a40aab49 into fff0829e157b4b90bda86ee5317bae6d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:35,389 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:35,389 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/C, priority=13, startTime=1734125614537; duration=0sec 2024-12-13T21:33:35,389 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:35,390 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:C 2024-12-13T21:33:35,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:35,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:35,396 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/e326877d7a6e486fbc13bd1e1306f6ce as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e326877d7a6e486fbc13bd1e1306f6ce 2024-12-13T21:33:35,399 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/A of c4e9541490a67306648a9c57a40aab49 into e326877d7a6e486fbc13bd1e1306f6ce(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:35,399 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:35,399 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/A, priority=13, startTime=1734125614537; duration=0sec 2024-12-13T21:33:35,399 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:35,399 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:A 2024-12-13T21:33:35,401 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c29db90341064e0e89e2c0da326b3098 2024-12-13T21:33:35,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125675404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125675404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125675404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125675405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/20387cbbefd145cbaee36929cbf1eb51 is 50, key is test_row_0/B:col10/1734125613330/Put/seqid=0 2024-12-13T21:33:35,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742420_1596 (size=12001) 2024-12-13T21:33:35,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125675454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125675506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125675507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125675507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125675507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125675709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125675709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125675709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:35,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125675710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:35,834 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/20387cbbefd145cbaee36929cbf1eb51 2024-12-13T21:33:35,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/584f090e7854423eaff2b74fd9ea60b7 is 50, key is test_row_0/C:col10/1734125613330/Put/seqid=0 2024-12-13T21:33:35,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742421_1597 (size=12001) 2024-12-13T21:33:35,845 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/584f090e7854423eaff2b74fd9ea60b7 2024-12-13T21:33:35,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/c29db90341064e0e89e2c0da326b3098 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098 2024-12-13T21:33:35,850 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098, entries=150, sequenceid=117, filesize=30.2 K 2024-12-13T21:33:35,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/20387cbbefd145cbaee36929cbf1eb51 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/20387cbbefd145cbaee36929cbf1eb51 2024-12-13T21:33:35,853 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/20387cbbefd145cbaee36929cbf1eb51, entries=150, sequenceid=117, filesize=11.7 K 2024-12-13T21:33:35,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/584f090e7854423eaff2b74fd9ea60b7 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/584f090e7854423eaff2b74fd9ea60b7 2024-12-13T21:33:35,856 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/584f090e7854423eaff2b74fd9ea60b7, entries=150, sequenceid=117, filesize=11.7 K 2024-12-13T21:33:35,856 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c4e9541490a67306648a9c57a40aab49 in 1304ms, sequenceid=117, compaction requested=false 2024-12-13T21:33:35,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:35,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:35,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-13T21:33:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-13T21:33:35,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-13T21:33:35,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7610 sec 2024-12-13T21:33:35,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.7640 sec 2024-12-13T21:33:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:36,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-13T21:33:36,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:36,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:36,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:36,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:36,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:36,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:36,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412135d6643a012e4423b926a2876bbf5ee92_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:36,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742422_1598 (size=12304) 2024-12-13T21:33:36,021 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:36,024 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412135d6643a012e4423b926a2876bbf5ee92_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412135d6643a012e4423b926a2876bbf5ee92_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:36,024 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/4c8daadd30a9491fb291da77f1a269b5, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:36,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/4c8daadd30a9491fb291da77f1a269b5 is 175, key is test_row_0/A:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:36,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742423_1599 (size=31105) 2024-12-13T21:33:36,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125676029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125676029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125676029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125676030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125676131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125676132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125676132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125676132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-13T21:33:36,198 INFO [Thread-2513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-13T21:33:36,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-13T21:33:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:36,200 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:36,201 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:36,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:36,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:36,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125676334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125676334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125676334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125676334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,352 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,428 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/4c8daadd30a9491fb291da77f1a269b5 2024-12-13T21:33:36,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/a16022ed7e0e4107b6f1643681fc1235 is 50, key is test_row_0/B:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:36,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742424_1600 (size=12151) 2024-12-13T21:33:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:36,504 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:36,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:36,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125676638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125676638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125676638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125676639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:36,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:36,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:36,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:36,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:36,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/a16022ed7e0e4107b6f1643681fc1235 2024-12-13T21:33:36,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3788fc414b584d67a65959deb341fa32 is 50, key is test_row_0/C:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:36,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742425_1601 (size=12151) 2024-12-13T21:33:36,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:36,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:36,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:36,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:36,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:37,112 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:37,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:37,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:37,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:37,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125677140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125677141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125677142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125677144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3788fc414b584d67a65959deb341fa32 2024-12-13T21:33:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/4c8daadd30a9491fb291da77f1a269b5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5 2024-12-13T21:33:37,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5, entries=150, sequenceid=135, filesize=30.4 K 2024-12-13T21:33:37,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/a16022ed7e0e4107b6f1643681fc1235 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a16022ed7e0e4107b6f1643681fc1235 2024-12-13T21:33:37,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a16022ed7e0e4107b6f1643681fc1235, entries=150, sequenceid=135, filesize=11.9 K 2024-12-13T21:33:37,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3788fc414b584d67a65959deb341fa32 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3788fc414b584d67a65959deb341fa32 2024-12-13T21:33:37,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3788fc414b584d67a65959deb341fa32, entries=150, sequenceid=135, filesize=11.9 K 2024-12-13T21:33:37,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for c4e9541490a67306648a9c57a40aab49 in 1246ms, sequenceid=135, compaction requested=true 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:37,260 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-13T21:33:37,260 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:37,260 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:37,260 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/A is initiating minor compaction (all files) 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/B is initiating minor compaction (all files) 2024-12-13T21:33:37,261 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/A in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,261 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/B in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,261 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/71865b1ae511432d90db39863012cf7a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/20387cbbefd145cbaee36929cbf1eb51, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a16022ed7e0e4107b6f1643681fc1235] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=35.5 K 2024-12-13T21:33:37,261 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e326877d7a6e486fbc13bd1e1306f6ce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=91.0 K 2024-12-13T21:33:37,261 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e326877d7a6e486fbc13bd1e1306f6ce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5] 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 71865b1ae511432d90db39863012cf7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734125611272 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting e326877d7a6e486fbc13bd1e1306f6ce, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734125611272 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 20387cbbefd145cbaee36929cbf1eb51, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734125613322 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting c29db90341064e0e89e2c0da326b3098, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734125613322 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting a16022ed7e0e4107b6f1643681fc1235, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125615403 2024-12-13T21:33:37,261 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8daadd30a9491fb291da77f1a269b5, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125615403 2024-12-13T21:33:37,264 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-13T21:33:37,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,265 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-13T21:33:37,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:37,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:37,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:37,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:37,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:37,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:37,266 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#B#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:37,266 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/142dcf1a3f884b30aeb58b783668e0f5 is 50, key is test_row_0/B:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:37,267 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:37,279 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412132d8f0fdeb3224c29a014fe8df6ff74db_c4e9541490a67306648a9c57a40aab49 store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:37,280 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412132d8f0fdeb3224c29a014fe8df6ff74db_c4e9541490a67306648a9c57a40aab49, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:37,280 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412132d8f0fdeb3224c29a014fe8df6ff74db_c4e9541490a67306648a9c57a40aab49 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:37,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213facf9ca2e1874da6bb84572b3d75cdd8_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125616028/Put/seqid=0 2024-12-13T21:33:37,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742426_1602 (size=12459) 2024-12-13T21:33:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:37,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742427_1603 (size=4469) 2024-12-13T21:33:37,317 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#A#compaction#507 average throughput is 0.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742428_1604 (size=12304) 2024-12-13T21:33:37,318 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/7b7c86d4aeed42168c631d4c443b982f is 175, key is test_row_0/A:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:37,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742429_1605 (size=31413) 2024-12-13T21:33:37,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:37,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125677485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125677587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:37,704 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/142dcf1a3f884b30aeb58b783668e0f5 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/142dcf1a3f884b30aeb58b783668e0f5 2024-12-13T21:33:37,706 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/B of c4e9541490a67306648a9c57a40aab49 into 142dcf1a3f884b30aeb58b783668e0f5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:37,706 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:37,706 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/B, priority=13, startTime=1734125617260; duration=0sec 2024-12-13T21:33:37,706 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:37,706 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:B 2024-12-13T21:33:37,706 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:37,707 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:37,707 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/C is initiating minor compaction (all files) 2024-12-13T21:33:37,707 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/C in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:37,707 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/fff0829e157b4b90bda86ee5317bae6d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/584f090e7854423eaff2b74fd9ea60b7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3788fc414b584d67a65959deb341fa32] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=35.5 K 2024-12-13T21:33:37,707 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting fff0829e157b4b90bda86ee5317bae6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1734125611272 2024-12-13T21:33:37,707 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 584f090e7854423eaff2b74fd9ea60b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734125613322 2024-12-13T21:33:37,708 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3788fc414b584d67a65959deb341fa32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125615403 2024-12-13T21:33:37,711 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#C#compaction#509 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:37,712 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/c3f2bfae53d74c20a92043c38876db27 is 50, key is test_row_0/C:col10/1734125615403/Put/seqid=0 2024-12-13T21:33:37,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742430_1606 (size=12459) 2024-12-13T21:33:37,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:37,720 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213facf9ca2e1874da6bb84572b3d75cdd8_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213facf9ca2e1874da6bb84572b3d75cdd8_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:37,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/b708f3ad96dd489784abd6ecf4ff2d42, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:37,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/b708f3ad96dd489784abd6ecf4ff2d42 is 175, key is test_row_0/A:col10/1734125616028/Put/seqid=0 2024-12-13T21:33:37,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742431_1607 (size=31105) 2024-12-13T21:33:37,730 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/7b7c86d4aeed42168c631d4c443b982f as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/7b7c86d4aeed42168c631d4c443b982f 2024-12-13T21:33:37,733 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/A of c4e9541490a67306648a9c57a40aab49 into 7b7c86d4aeed42168c631d4c443b982f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:37,733 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:37,733 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/A, priority=13, startTime=1734125617260; duration=0sec 2024-12-13T21:33:37,733 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:37,733 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:A 2024-12-13T21:33:37,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125677789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125678091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,118 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/c3f2bfae53d74c20a92043c38876db27 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/c3f2bfae53d74c20a92043c38876db27 2024-12-13T21:33:38,121 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/C of c4e9541490a67306648a9c57a40aab49 into c3f2bfae53d74c20a92043c38876db27(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:38,121 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:38,121 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/C, priority=13, startTime=1734125617260; duration=0sec 2024-12-13T21:33:38,121 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:38,121 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:C 2024-12-13T21:33:38,125 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/b708f3ad96dd489784abd6ecf4ff2d42 2024-12-13T21:33:38,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/ec83d509f0a844a8a951e52e57a2a914 is 50, key is test_row_0/B:col10/1734125616028/Put/seqid=0 2024-12-13T21:33:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742432_1608 (size=12151) 2024-12-13T21:33:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125678145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125678147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125678148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125678148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:38,534 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/ec83d509f0a844a8a951e52e57a2a914 2024-12-13T21:33:38,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/621c49236cda464da71c08d0e35b459b is 50, key is test_row_0/C:col10/1734125616028/Put/seqid=0 2024-12-13T21:33:38,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742433_1609 (size=12151) 2024-12-13T21:33:38,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:38,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125678595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:38,942 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/621c49236cda464da71c08d0e35b459b 2024-12-13T21:33:38,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/b708f3ad96dd489784abd6ecf4ff2d42 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42 2024-12-13T21:33:38,947 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42, entries=150, sequenceid=157, filesize=30.4 K 2024-12-13T21:33:38,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/ec83d509f0a844a8a951e52e57a2a914 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/ec83d509f0a844a8a951e52e57a2a914 2024-12-13T21:33:38,950 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/ec83d509f0a844a8a951e52e57a2a914, entries=150, sequenceid=157, filesize=11.9 K 2024-12-13T21:33:38,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/621c49236cda464da71c08d0e35b459b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/621c49236cda464da71c08d0e35b459b 2024-12-13T21:33:38,953 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/621c49236cda464da71c08d0e35b459b, entries=150, sequenceid=157, filesize=11.9 K 2024-12-13T21:33:38,954 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for c4e9541490a67306648a9c57a40aab49 in 1690ms, sequenceid=157, compaction requested=false 2024-12-13T21:33:38,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:38,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:38,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-13T21:33:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-13T21:33:38,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-13T21:33:38,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7540 sec 2024-12-13T21:33:38,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.7580 sec 2024-12-13T21:33:39,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:39,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-13T21:33:39,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:39,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:39,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:39,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:39,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:39,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:39,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b0fce5cec6fe479fb262b3c0b82012d2_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:39,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742434_1610 (size=12304) 2024-12-13T21:33:39,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:39,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125679648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:39,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:39,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125679751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:39,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125679954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,020 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:40,022 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213b0fce5cec6fe479fb262b3c0b82012d2_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b0fce5cec6fe479fb262b3c0b82012d2_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:40,023 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/3cb2c648033748af81cc307b95d7984b, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:40,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/3cb2c648033748af81cc307b95d7984b is 175, key is test_row_0/A:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:40,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742435_1611 (size=31105) 2024-12-13T21:33:40,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125680150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,151 DEBUG [Thread-2507 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4122 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:40,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125680160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,161 DEBUG [Thread-2505 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:40,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:40,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125680163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,164 DEBUG [Thread-2503 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:40,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:40,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125680165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,166 DEBUG [Thread-2511 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:40,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:40,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125680256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T21:33:40,304 INFO [Thread-2513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-13T21:33:40,305 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-13T21:33:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-13T21:33:40,306 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:40,306 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:40,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-13T21:33:40,426 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/3cb2c648033748af81cc307b95d7984b 2024-12-13T21:33:40,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/359eee6d92764863bf62352b19670e18 is 50, key is test_row_0/B:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:40,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742436_1612 (size=12151) 2024-12-13T21:33:40,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-13T21:33:40,609 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:40,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:40,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125680761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,761 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:40,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:40,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/359eee6d92764863bf62352b19670e18 2024-12-13T21:33:40,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/736c7c78824e4bc09ab395a11d4ac597 is 50, key is test_row_0/C:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:40,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742437_1613 (size=12151) 2024-12-13T21:33:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-13T21:33:40,913 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:40,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:40,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:40,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:41,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:41,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:41,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:41,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:41,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:41,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:41,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/736c7c78824e4bc09ab395a11d4ac597 2024-12-13T21:33:41,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/3cb2c648033748af81cc307b95d7984b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b 2024-12-13T21:33:41,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b, entries=150, sequenceid=175, filesize=30.4 K 2024-12-13T21:33:41,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/359eee6d92764863bf62352b19670e18 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/359eee6d92764863bf62352b19670e18 2024-12-13T21:33:41,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/359eee6d92764863bf62352b19670e18, entries=150, sequenceid=175, filesize=11.9 K 2024-12-13T21:33:41,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/736c7c78824e4bc09ab395a11d4ac597 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/736c7c78824e4bc09ab395a11d4ac597 2024-12-13T21:33:41,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/736c7c78824e4bc09ab395a11d4ac597, entries=150, sequenceid=175, filesize=11.9 K 2024-12-13T21:33:41,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for c4e9541490a67306648a9c57a40aab49 in 1648ms, sequenceid=175, compaction requested=true 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:41,254 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:41,254 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:41,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:41,255 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:41,255 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:41,255 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/B is initiating minor compaction (all files) 2024-12-13T21:33:41,255 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/A is initiating minor compaction (all files) 2024-12-13T21:33:41,255 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/B in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,255 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/A in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,255 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/7b7c86d4aeed42168c631d4c443b982f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=91.4 K 2024-12-13T21:33:41,255 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/142dcf1a3f884b30aeb58b783668e0f5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/ec83d509f0a844a8a951e52e57a2a914, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/359eee6d92764863bf62352b19670e18] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=35.9 K 2024-12-13T21:33:41,255 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,255 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/7b7c86d4aeed42168c631d4c443b982f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b] 2024-12-13T21:33:41,256 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 142dcf1a3f884b30aeb58b783668e0f5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125615403 2024-12-13T21:33:41,256 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b7c86d4aeed42168c631d4c443b982f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125615403 2024-12-13T21:33:41,256 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting ec83d509f0a844a8a951e52e57a2a914, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734125616028 2024-12-13T21:33:41,256 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 359eee6d92764863bf62352b19670e18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734125617475 2024-12-13T21:33:41,256 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting b708f3ad96dd489784abd6ecf4ff2d42, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734125616028 2024-12-13T21:33:41,256 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cb2c648033748af81cc307b95d7984b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734125617475 2024-12-13T21:33:41,261 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:41,262 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#B#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:41,262 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/aaf64375ee8149789caff946f53f1a69 is 50, key is test_row_0/B:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:41,274 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121395ec09aa862347f4bfe64d84ca9549b3_c4e9541490a67306648a9c57a40aab49 store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:41,275 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121395ec09aa862347f4bfe64d84ca9549b3_c4e9541490a67306648a9c57a40aab49, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:41,275 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121395ec09aa862347f4bfe64d84ca9549b3_c4e9541490a67306648a9c57a40aab49 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:41,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742438_1614 (size=12561) 2024-12-13T21:33:41,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742439_1615 (size=4469) 2024-12-13T21:33:41,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:41,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,371 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:41,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:41,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121378e18f841eb143e3b243d947c5effb5b_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125619644/Put/seqid=0 2024-12-13T21:33:41,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742440_1616 (size=12304) 2024-12-13T21:33:41,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:41,381 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121378e18f841eb143e3b243d947c5effb5b_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121378e18f841eb143e3b243d947c5effb5b_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:41,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/a174fdc024e44cf5bee9601c452bccd6, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:41,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/a174fdc024e44cf5bee9601c452bccd6 is 175, key is test_row_0/A:col10/1734125619644/Put/seqid=0 2024-12-13T21:33:41,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742441_1617 (size=31105) 2024-12-13T21:33:41,385 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/a174fdc024e44cf5bee9601c452bccd6 2024-12-13T21:33:41,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/32b83a0439934032ae2e906e526efbe8 is 50, key is test_row_0/B:col10/1734125619644/Put/seqid=0 2024-12-13T21:33:41,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742442_1618 (size=12151) 2024-12-13T21:33:41,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-13T21:33:41,679 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/aaf64375ee8149789caff946f53f1a69 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/aaf64375ee8149789caff946f53f1a69 2024-12-13T21:33:41,679 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#A#compaction#516 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:41,680 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/89c6c0c01cba4f0da2ff8120b8503145 is 175, key is test_row_0/A:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:41,682 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/B of c4e9541490a67306648a9c57a40aab49 into aaf64375ee8149789caff946f53f1a69(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:41,682 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:41,682 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/B, priority=13, startTime=1734125621254; duration=0sec 2024-12-13T21:33:41,683 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:41,683 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:B 2024-12-13T21:33:41,683 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:41,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742443_1619 (size=31515) 2024-12-13T21:33:41,683 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:41,683 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/C is initiating minor compaction (all files) 2024-12-13T21:33:41,683 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/C in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:41,683 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/c3f2bfae53d74c20a92043c38876db27, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/621c49236cda464da71c08d0e35b459b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/736c7c78824e4bc09ab395a11d4ac597] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=35.9 K 2024-12-13T21:33:41,684 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting c3f2bfae53d74c20a92043c38876db27, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734125615403 2024-12-13T21:33:41,684 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 621c49236cda464da71c08d0e35b459b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734125616028 2024-12-13T21:33:41,684 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 736c7c78824e4bc09ab395a11d4ac597, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734125617475 2024-12-13T21:33:41,688 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#C#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:41,688 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3a4000cb87a3424b9d6f1678b4a28334 is 50, key is test_row_0/C:col10/1734125617479/Put/seqid=0 2024-12-13T21:33:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742444_1620 (size=12561) 2024-12-13T21:33:41,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:41,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:41,793 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/32b83a0439934032ae2e906e526efbe8 2024-12-13T21:33:41,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:41,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125681793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:41,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/e7328ab0d64440aaa8499fd803334303 is 50, key is test_row_0/C:col10/1734125619644/Put/seqid=0 2024-12-13T21:33:41,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742445_1621 (size=12151) 2024-12-13T21:33:41,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125681896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,086 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/89c6c0c01cba4f0da2ff8120b8503145 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/89c6c0c01cba4f0da2ff8120b8503145 2024-12-13T21:33:42,089 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/A of c4e9541490a67306648a9c57a40aab49 into 89c6c0c01cba4f0da2ff8120b8503145(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:42,089 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:42,089 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/A, priority=13, startTime=1734125621254; duration=0sec 2024-12-13T21:33:42,089 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:42,089 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:A 2024-12-13T21:33:42,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125682097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,104 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/3a4000cb87a3424b9d6f1678b4a28334 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3a4000cb87a3424b9d6f1678b4a28334 2024-12-13T21:33:42,106 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/C of c4e9541490a67306648a9c57a40aab49 into 3a4000cb87a3424b9d6f1678b4a28334(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:42,106 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:42,106 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/C, priority=13, startTime=1734125621254; duration=0sec 2024-12-13T21:33:42,106 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:42,106 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:C 2024-12-13T21:33:42,202 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/e7328ab0d64440aaa8499fd803334303 2024-12-13T21:33:42,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/a174fdc024e44cf5bee9601c452bccd6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6 2024-12-13T21:33:42,207 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6, entries=150, sequenceid=196, filesize=30.4 K 2024-12-13T21:33:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/32b83a0439934032ae2e906e526efbe8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/32b83a0439934032ae2e906e526efbe8 2024-12-13T21:33:42,210 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/32b83a0439934032ae2e906e526efbe8, entries=150, sequenceid=196, filesize=11.9 K 2024-12-13T21:33:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/e7328ab0d64440aaa8499fd803334303 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/e7328ab0d64440aaa8499fd803334303 2024-12-13T21:33:42,213 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/e7328ab0d64440aaa8499fd803334303, entries=150, sequenceid=196, filesize=11.9 K 2024-12-13T21:33:42,213 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for c4e9541490a67306648a9c57a40aab49 in 842ms, sequenceid=196, compaction requested=false 2024-12-13T21:33:42,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:42,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-13T21:33:42,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-13T21:33:42,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-13T21:33:42,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9080 sec 2024-12-13T21:33:42,216 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.9100 sec 2024-12-13T21:33:42,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:42,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-13T21:33:42,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:42,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213329f341397b0480197e6296678e26e8c_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-13T21:33:42,409 INFO [Thread-2513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-13T21:33:42,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-12-13T21:33:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-13T21:33:42,411 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-13T21:33:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:42,411 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T21:33:42,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T21:33:42,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742446_1622 (size=12304) 2024-12-13T21:33:42,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125682444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:42,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125682547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,563 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:42,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:42,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:42,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:42,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:42,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125682751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,813 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:42,815 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213329f341397b0480197e6296678e26e8c_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213329f341397b0480197e6296678e26e8c_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:42,816 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/93420021fdbd4ed2adf97d664b5bf04a, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:42,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/93420021fdbd4ed2adf97d664b5bf04a is 175, key is test_row_0/A:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:42,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742447_1623 (size=31105) 2024-12-13T21:33:42,867 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:42,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:42,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:42,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:43,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:43,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125683053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,219 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/93420021fdbd4ed2adf97d664b5bf04a 2024-12-13T21:33:43,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/94d41941659847c4a0a2c77366e4d5b2 is 50, key is test_row_0/B:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:43,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742448_1624 (size=12151) 2024-12-13T21:33:43,324 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:43,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125683560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/94d41941659847c4a0a2c77366e4d5b2 2024-12-13T21:33:43,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/17cb7d5f8b164f508c7e70810db3e790 is 50, key is test_row_0/C:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:43,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742449_1625 (size=12151) 2024-12-13T21:33:43,780 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,932 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:43,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:43,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:43,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:43,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:43,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T21:33:44,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/17cb7d5f8b164f508c7e70810db3e790 2024-12-13T21:33:44,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/93420021fdbd4ed2adf97d664b5bf04a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a 2024-12-13T21:33:44,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a, entries=150, sequenceid=215, filesize=30.4 K 2024-12-13T21:33:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/94d41941659847c4a0a2c77366e4d5b2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/94d41941659847c4a0a2c77366e4d5b2 2024-12-13T21:33:44,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/94d41941659847c4a0a2c77366e4d5b2, entries=150, sequenceid=215, filesize=11.9 K 2024-12-13T21:33:44,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/17cb7d5f8b164f508c7e70810db3e790 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/17cb7d5f8b164f508c7e70810db3e790 2024-12-13T21:33:44,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/17cb7d5f8b164f508c7e70810db3e790, entries=150, sequenceid=215, filesize=11.9 K 2024-12-13T21:33:44,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for c4e9541490a67306648a9c57a40aab49 in 1644ms, sequenceid=215, compaction requested=true 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:44,047 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:44,047 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/A is initiating minor compaction (all files) 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/B is initiating minor compaction (all files) 2024-12-13T21:33:44,048 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/A in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:44,048 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/B in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:44,048 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/aaf64375ee8149789caff946f53f1a69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/32b83a0439934032ae2e906e526efbe8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/94d41941659847c4a0a2c77366e4d5b2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=36.0 K 2024-12-13T21:33:44,048 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/89c6c0c01cba4f0da2ff8120b8503145, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=91.5 K 2024-12-13T21:33:44,048 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/89c6c0c01cba4f0da2ff8120b8503145, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a] 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting aaf64375ee8149789caff946f53f1a69, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734125617475 2024-12-13T21:33:44,048 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89c6c0c01cba4f0da2ff8120b8503145, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734125617475 2024-12-13T21:33:44,049 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 32b83a0439934032ae2e906e526efbe8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734125619639 2024-12-13T21:33:44,049 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting a174fdc024e44cf5bee9601c452bccd6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734125619639 2024-12-13T21:33:44,049 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 94d41941659847c4a0a2c77366e4d5b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734125621787 2024-12-13T21:33:44,049 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93420021fdbd4ed2adf97d664b5bf04a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734125621787 2024-12-13T21:33:44,053 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:44,054 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#B#compaction#524 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:44,054 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/5601c8d921a64db2b66f09d8d01223bc is 50, key is test_row_0/B:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:44,056 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121368c665111da74bfd89c1e1875232806a_c4e9541490a67306648a9c57a40aab49 store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:44,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742450_1626 (size=12663) 2024-12-13T21:33:44,057 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121368c665111da74bfd89c1e1875232806a_c4e9541490a67306648a9c57a40aab49, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:44,057 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121368c665111da74bfd89c1e1875232806a_c4e9541490a67306648a9c57a40aab49 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:44,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742451_1627 (size=4469) 2024-12-13T21:33:44,060 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#A#compaction#525 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:44,060 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/8335bf33252741ca864c05ad5ac93bb3 is 175, key is test_row_0/A:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:44,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742452_1628 (size=31617) 2024-12-13T21:33:44,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38989 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-13T21:33:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:44,085 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-13T21:33:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:44,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:44,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:44,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:44,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:44,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:44,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f307ad89d0884cd6b7114b173ddc0372_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125622441/Put/seqid=0 2024-12-13T21:33:44,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742453_1629 (size=12304) 2024-12-13T21:33:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:44,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. as already flushing 2024-12-13T21:33:44,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125684195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125684195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125684196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48956 deadline: 1734125684202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,203 DEBUG [Thread-2503 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8173 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., hostname=fd052dae32be,38989,1734125418878, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-13T21:33:44,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125684297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125684297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125684298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,460 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/5601c8d921a64db2b66f09d8d01223bc as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/5601c8d921a64db2b66f09d8d01223bc 2024-12-13T21:33:44,463 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/B of c4e9541490a67306648a9c57a40aab49 into 5601c8d921a64db2b66f09d8d01223bc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:44,463 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:44,463 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/B, priority=13, startTime=1734125624047; duration=0sec 2024-12-13T21:33:44,463 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:44,463 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:B 2024-12-13T21:33:44,463 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:44,464 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:44,464 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/C is initiating minor compaction (all files) 2024-12-13T21:33:44,464 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/C in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:44,464 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3a4000cb87a3424b9d6f1678b4a28334, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/e7328ab0d64440aaa8499fd803334303, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/17cb7d5f8b164f508c7e70810db3e790] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=36.0 K 2024-12-13T21:33:44,464 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a4000cb87a3424b9d6f1678b4a28334, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1734125617475 2024-12-13T21:33:44,464 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting e7328ab0d64440aaa8499fd803334303, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1734125619639 2024-12-13T21:33:44,464 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 17cb7d5f8b164f508c7e70810db3e790, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734125621787 2024-12-13T21:33:44,466 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/8335bf33252741ca864c05ad5ac93bb3 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/8335bf33252741ca864c05ad5ac93bb3 2024-12-13T21:33:44,469 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/A of c4e9541490a67306648a9c57a40aab49 into 8335bf33252741ca864c05ad5ac93bb3(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:44,469 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#C#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:44,469 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:44,469 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/A, priority=13, startTime=1734125624047; duration=0sec 2024-12-13T21:33:44,469 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:44,469 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:A 2024-12-13T21:33:44,469 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/d79b8537eb2f42b0a3d070f1ce92392c is 50, key is test_row_0/C:col10/1734125621789/Put/seqid=0 2024-12-13T21:33:44,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742454_1630 (size=12663) 2024-12-13T21:33:44,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:44,495 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213f307ad89d0884cd6b7114b173ddc0372_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f307ad89d0884cd6b7114b173ddc0372_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:44,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/9a6aa1f0359040b58cb8c01d9a547e04, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:44,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/9a6aa1f0359040b58cb8c01d9a547e04 is 175, key is test_row_0/A:col10/1734125622441/Put/seqid=0 2024-12-13T21:33:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742455_1631 (size=31105) 2024-12-13T21:33:44,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125684499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125684499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125684502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:44,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48942 deadline: 1734125684564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125684803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125684803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125684805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:44,875 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/d79b8537eb2f42b0a3d070f1ce92392c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/d79b8537eb2f42b0a3d070f1ce92392c 2024-12-13T21:33:44,878 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/C of c4e9541490a67306648a9c57a40aab49 into d79b8537eb2f42b0a3d070f1ce92392c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:44,878 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:44,878 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/C, priority=13, startTime=1734125624047; duration=0sec 2024-12-13T21:33:44,878 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:44,878 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:C 2024-12-13T21:33:44,900 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/9a6aa1f0359040b58cb8c01d9a547e04 2024-12-13T21:33:44,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/beed98573aea42ee8f3a4857be4abc75 is 50, key is test_row_0/B:col10/1734125622441/Put/seqid=0 2024-12-13T21:33:44,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742456_1632 (size=12151) 2024-12-13T21:33:45,308 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/beed98573aea42ee8f3a4857be4abc75 2024-12-13T21:33:45,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:45,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48922 deadline: 1734125685310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:45,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:45,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48962 deadline: 1734125685310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:45,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T21:33:45,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:48910 deadline: 1734125685310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 2024-12-13T21:33:45,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/1383499e8d324926be385ddd93eed678 is 50, key is test_row_0/C:col10/1734125622441/Put/seqid=0 2024-12-13T21:33:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742457_1633 (size=12151) 2024-12-13T21:33:45,337 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/1383499e8d324926be385ddd93eed678 2024-12-13T21:33:45,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/9a6aa1f0359040b58cb8c01d9a547e04 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04 2024-12-13T21:33:45,343 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04, entries=150, sequenceid=235, filesize=30.4 K 2024-12-13T21:33:45,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/beed98573aea42ee8f3a4857be4abc75 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/beed98573aea42ee8f3a4857be4abc75 2024-12-13T21:33:45,347 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/beed98573aea42ee8f3a4857be4abc75, entries=150, sequenceid=235, filesize=11.9 K 2024-12-13T21:33:45,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/1383499e8d324926be385ddd93eed678 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/1383499e8d324926be385ddd93eed678 2024-12-13T21:33:45,351 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/1383499e8d324926be385ddd93eed678, entries=150, sequenceid=235, filesize=11.9 K 2024-12-13T21:33:45,351 INFO [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for c4e9541490a67306648a9c57a40aab49 in 1266ms, sequenceid=235, compaction requested=false 2024-12-13T21:33:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:45,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fd052dae32be:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-13T21:33:45,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-13T21:33:45,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-13T21:33:45,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9400 sec 2024-12-13T21:33:45,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.9440 sec 2024-12-13T21:33:45,881 DEBUG [Thread-2514 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51fccca6 to 127.0.0.1:57927 2024-12-13T21:33:45,881 DEBUG [Thread-2516 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x539997ae to 127.0.0.1:57927 2024-12-13T21:33:45,881 DEBUG [Thread-2514 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:45,881 DEBUG [Thread-2516 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:45,881 DEBUG [Thread-2522 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a684cd4 to 127.0.0.1:57927 2024-12-13T21:33:45,881 DEBUG [Thread-2522 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:45,882 DEBUG [Thread-2518 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65b56307 to 127.0.0.1:57927 2024-12-13T21:33:45,882 DEBUG [Thread-2518 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:45,882 DEBUG [Thread-2520 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a733412 to 127.0.0.1:57927 2024-12-13T21:33:45,882 DEBUG [Thread-2520 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38989 {}] regionserver.HRegion(8581): Flush requested on c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:46,318 DEBUG [Thread-2505 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31f7e171 to 127.0.0.1:57927 2024-12-13T21:33:46,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-13T21:33:46,318 DEBUG [Thread-2505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:46,319 DEBUG [Thread-2507 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c078737 to 127.0.0.1:57927 2024-12-13T21:33:46,319 DEBUG [Thread-2507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:46,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:46,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:46,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:46,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:46,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:46,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:46,320 DEBUG [Thread-2511 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76670256 to 127.0.0.1:57927 2024-12-13T21:33:46,320 DEBUG [Thread-2511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:46,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ccc5df23660146f2ba44e28c37321509_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_0/A:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:46,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742458_1634 (size=12304) 2024-12-13T21:33:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-13T21:33:46,516 INFO [Thread-2513 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-13T21:33:46,570 DEBUG [Thread-2509 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bf8843a to 127.0.0.1:57927 2024-12-13T21:33:46,570 DEBUG [Thread-2509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:46,729 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:46,732 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213ccc5df23660146f2ba44e28c37321509_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ccc5df23660146f2ba44e28c37321509_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:46,733 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/17b67cde4a0a40d599148df69a06fdb6, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:46,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/17b67cde4a0a40d599148df69a06fdb6 is 175, key is test_row_0/A:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:46,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742459_1635 (size=31105) 2024-12-13T21:33:47,140 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/17b67cde4a0a40d599148df69a06fdb6 2024-12-13T21:33:47,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/6a10565b5cff453db2866e21d91523cf is 50, key is test_row_0/B:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:47,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742460_1636 (size=12151) 2024-12-13T21:33:47,355 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T21:33:47,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/6a10565b5cff453db2866e21d91523cf 2024-12-13T21:33:47,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/341e7015266e4ec79e79945e49512655 is 50, key is test_row_0/C:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:47,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742461_1637 (size=12151) 2024-12-13T21:33:47,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/341e7015266e4ec79e79945e49512655 2024-12-13T21:33:47,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/17b67cde4a0a40d599148df69a06fdb6 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6 2024-12-13T21:33:48,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6, entries=150, sequenceid=256, filesize=30.4 K 2024-12-13T21:33:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/6a10565b5cff453db2866e21d91523cf as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6a10565b5cff453db2866e21d91523cf 2024-12-13T21:33:48,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6a10565b5cff453db2866e21d91523cf, entries=150, sequenceid=256, filesize=11.9 K 2024-12-13T21:33:48,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/341e7015266e4ec79e79945e49512655 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/341e7015266e4ec79e79945e49512655 2024-12-13T21:33:48,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/341e7015266e4ec79e79945e49512655, entries=150, sequenceid=256, filesize=11.9 K 2024-12-13T21:33:48,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=13.42 KB/13740 for c4e9541490a67306648a9c57a40aab49 in 1694ms, sequenceid=256, compaction requested=true 2024-12-13T21:33:48,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:48,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:A, priority=-2147483648, current under compaction store size is 1 2024-12-13T21:33:48,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:48,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:B, priority=-2147483648, current under compaction store size is 2 2024-12-13T21:33:48,012 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:48,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:48,012 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:48,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c4e9541490a67306648a9c57a40aab49:C, priority=-2147483648, current under compaction store size is 3 2024-12-13T21:33:48,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:48,013 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93827 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:48,013 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:48,013 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/A is initiating minor compaction (all files) 2024-12-13T21:33:48,013 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/B is initiating minor compaction (all files) 2024-12-13T21:33:48,013 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/A in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:48,013 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/B in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:48,014 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/8335bf33252741ca864c05ad5ac93bb3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=91.6 K 2024-12-13T21:33:48,014 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/5601c8d921a64db2b66f09d8d01223bc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/beed98573aea42ee8f3a4857be4abc75, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6a10565b5cff453db2866e21d91523cf] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=36.1 K 2024-12-13T21:33:48,014 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:48,014 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. files: [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/8335bf33252741ca864c05ad5ac93bb3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6] 2024-12-13T21:33:48,014 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 5601c8d921a64db2b66f09d8d01223bc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734125621787 2024-12-13T21:33:48,014 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8335bf33252741ca864c05ad5ac93bb3, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734125621787 2024-12-13T21:33:48,014 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting beed98573aea42ee8f3a4857be4abc75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734125622439 2024-12-13T21:33:48,014 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a6aa1f0359040b58cb8c01d9a547e04, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734125622439 2024-12-13T21:33:48,015 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a10565b5cff453db2866e21d91523cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1734125624192 2024-12-13T21:33:48,015 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17b67cde4a0a40d599148df69a06fdb6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1734125624192 2024-12-13T21:33:48,020 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:48,020 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#B#compaction#533 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:48,021 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/265b4e22e38b49e7b25983ad7d3061c9 is 50, key is test_row_0/B:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:48,021 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241213bc6a1991b4b74b72b2d44f73f87d5593_c4e9541490a67306648a9c57a40aab49 store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:48,023 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241213bc6a1991b4b74b72b2d44f73f87d5593_c4e9541490a67306648a9c57a40aab49, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:48,023 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241213bc6a1991b4b74b72b2d44f73f87d5593_c4e9541490a67306648a9c57a40aab49 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:48,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742462_1638 (size=12765) 2024-12-13T21:33:48,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742463_1639 (size=4469) 2024-12-13T21:33:48,429 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#A#compaction#534 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:48,430 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/78e81a69d24b40e6b357dc4cc2741389 is 175, key is test_row_0/A:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:48,435 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/265b4e22e38b49e7b25983ad7d3061c9 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/265b4e22e38b49e7b25983ad7d3061c9 2024-12-13T21:33:48,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742464_1640 (size=31719) 2024-12-13T21:33:48,440 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/B of c4e9541490a67306648a9c57a40aab49 into 265b4e22e38b49e7b25983ad7d3061c9(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:48,440 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:48,440 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/B, priority=13, startTime=1734125628012; duration=0sec 2024-12-13T21:33:48,440 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-13T21:33:48,440 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:B 2024-12-13T21:33:48,440 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T21:33:48,441 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T21:33:48,441 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1540): c4e9541490a67306648a9c57a40aab49/C is initiating minor compaction (all files) 2024-12-13T21:33:48,441 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c4e9541490a67306648a9c57a40aab49/C in TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:48,441 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/d79b8537eb2f42b0a3d070f1ce92392c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/1383499e8d324926be385ddd93eed678, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/341e7015266e4ec79e79945e49512655] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp, totalSize=36.1 K 2024-12-13T21:33:48,441 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting d79b8537eb2f42b0a3d070f1ce92392c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734125621787 2024-12-13T21:33:48,442 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 1383499e8d324926be385ddd93eed678, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734125622439 2024-12-13T21:33:48,442 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] compactions.Compactor(224): Compacting 341e7015266e4ec79e79945e49512655, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1734125624192 2024-12-13T21:33:48,447 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c4e9541490a67306648a9c57a40aab49#C#compaction#535 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T21:33:48,448 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/349ff9af19b94f6d8be6c22f7a7cec26 is 50, key is test_row_0/C:col10/1734125626317/Put/seqid=0 2024-12-13T21:33:48,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742465_1641 (size=12765) 2024-12-13T21:33:48,844 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/78e81a69d24b40e6b357dc4cc2741389 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/78e81a69d24b40e6b357dc4cc2741389 2024-12-13T21:33:48,850 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/A of c4e9541490a67306648a9c57a40aab49 into 78e81a69d24b40e6b357dc4cc2741389(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:48,850 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:48,850 INFO [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/A, priority=13, startTime=1734125628012; duration=0sec 2024-12-13T21:33:48,850 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:48,850 DEBUG [RS:0;fd052dae32be:38989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:A 2024-12-13T21:33:48,855 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/349ff9af19b94f6d8be6c22f7a7cec26 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/349ff9af19b94f6d8be6c22f7a7cec26 2024-12-13T21:33:48,858 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c4e9541490a67306648a9c57a40aab49/C of c4e9541490a67306648a9c57a40aab49 into 349ff9af19b94f6d8be6c22f7a7cec26(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T21:33:48,858 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:48,858 INFO [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49., storeName=c4e9541490a67306648a9c57a40aab49/C, priority=13, startTime=1734125628013; duration=0sec 2024-12-13T21:33:48,858 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T21:33:48,858 DEBUG [RS:0;fd052dae32be:38989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c4e9541490a67306648a9c57a40aab49:C 2024-12-13T21:33:54,283 DEBUG [Thread-2503 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d688bcb to 127.0.0.1:57927 2024-12-13T21:33:54,283 DEBUG [Thread-2503 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:54,283 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-13T21:33:54,283 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-12-13T21:33:54,283 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 94 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8954 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8773 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9232 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8999 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8774 2024-12-13T21:33:54,284 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-13T21:33:54,284 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:33:54,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79a7bd2b to 127.0.0.1:57927 2024-12-13T21:33:54,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:54,285 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-13T21:33:54,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-12-13T21:33:54,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T21:33:54,289 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125634288"}]},"ts":"1734125634288"} 2024-12-13T21:33:54,290 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-13T21:33:54,308 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-13T21:33:54,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-13T21:33:54,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, UNASSIGN}] 2024-12-13T21:33:54,312 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, UNASSIGN 2024-12-13T21:33:54,313 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=CLOSING, regionLocation=fd052dae32be,38989,1734125418878 2024-12-13T21:33:54,315 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T21:33:54,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878}] 2024-12-13T21:33:54,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T21:33:54,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to fd052dae32be,38989,1734125418878 2024-12-13T21:33:54,469 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:54,469 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T21:33:54,469 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing c4e9541490a67306648a9c57a40aab49, disabling compactions & flushes 2024-12-13T21:33:54,469 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:54,469 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:54,469 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. after waiting 0 ms 2024-12-13T21:33:54,469 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:54,469 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing c4e9541490a67306648a9c57a40aab49 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-13T21:33:54,470 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=A 2024-12-13T21:33:54,470 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:54,471 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=B 2024-12-13T21:33:54,471 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:54,471 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c4e9541490a67306648a9c57a40aab49, store=C 2024-12-13T21:33:54,471 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-13T21:33:54,481 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121351eb39024bf748bf9c5e1117fa52bcb2_c4e9541490a67306648a9c57a40aab49 is 50, key is test_row_1/A:col10/1734125634280/Put/seqid=0 2024-12-13T21:33:54,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742466_1642 (size=9914) 2024-12-13T21:33:54,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T21:33:54,888 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T21:33:54,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T21:33:54,896 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121351eb39024bf748bf9c5e1117fa52bcb2_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121351eb39024bf748bf9c5e1117fa52bcb2_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:54,897 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/64bd7d2feb3e4748b17605231f708451, store: [table=TestAcidGuarantees family=A region=c4e9541490a67306648a9c57a40aab49] 2024-12-13T21:33:54,898 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/64bd7d2feb3e4748b17605231f708451 is 175, key is test_row_1/A:col10/1734125634280/Put/seqid=0 2024-12-13T21:33:54,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742467_1643 (size=22561) 2024-12-13T21:33:54,902 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=265, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/64bd7d2feb3e4748b17605231f708451 2024-12-13T21:33:54,908 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/6f7114d581b24edf8580484dd53313a2 is 50, key is test_row_1/B:col10/1734125634280/Put/seqid=0 2024-12-13T21:33:54,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742468_1644 (size=9857) 2024-12-13T21:33:55,313 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/6f7114d581b24edf8580484dd53313a2 2024-12-13T21:33:55,326 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/462222e8b42148518ce6bbb51f70c571 is 50, key is test_row_1/C:col10/1734125634280/Put/seqid=0 2024-12-13T21:33:55,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742469_1645 (size=9857) 2024-12-13T21:33:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T21:33:55,732 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/462222e8b42148518ce6bbb51f70c571 2024-12-13T21:33:55,740 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/A/64bd7d2feb3e4748b17605231f708451 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/64bd7d2feb3e4748b17605231f708451 2024-12-13T21:33:55,745 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/64bd7d2feb3e4748b17605231f708451, entries=100, sequenceid=265, filesize=22.0 K 2024-12-13T21:33:55,746 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/B/6f7114d581b24edf8580484dd53313a2 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6f7114d581b24edf8580484dd53313a2 2024-12-13T21:33:55,750 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6f7114d581b24edf8580484dd53313a2, entries=100, sequenceid=265, filesize=9.6 K 2024-12-13T21:33:55,751 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/.tmp/C/462222e8b42148518ce6bbb51f70c571 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/462222e8b42148518ce6bbb51f70c571 2024-12-13T21:33:55,755 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/462222e8b42148518ce6bbb51f70c571, entries=100, sequenceid=265, filesize=9.6 K 2024-12-13T21:33:55,756 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for c4e9541490a67306648a9c57a40aab49 in 1287ms, sequenceid=265, compaction requested=false 2024-12-13T21:33:55,757 DEBUG [StoreCloser-TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e6f5cc8329014e0fa13a0f66a813087d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e326877d7a6e486fbc13bd1e1306f6ce, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/7b7c86d4aeed42168c631d4c443b982f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/89c6c0c01cba4f0da2ff8120b8503145, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/8335bf33252741ca864c05ad5ac93bb3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6] to archive 2024-12-13T21:33:55,758 DEBUG [StoreCloser-TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:33:55,761 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c0ed2f7914c5476b856ffc1e693a74f1 2024-12-13T21:33:55,761 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/602651688b58466bb7a50b8a31164082 2024-12-13T21:33:55,761 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e6f5cc8329014e0fa13a0f66a813087d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e6f5cc8329014e0fa13a0f66a813087d 2024-12-13T21:33:55,761 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/2267d3233d344fcd8f508c0ae23daf9a 2024-12-13T21:33:55,761 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/29fed128636747c9ae64310bbf453c79 2024-12-13T21:33:55,762 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e2752f6dd3cc41109e6f8b28bd1ae58e 2024-12-13T21:33:55,762 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e326877d7a6e486fbc13bd1e1306f6ce to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/e326877d7a6e486fbc13bd1e1306f6ce 2024-12-13T21:33:55,762 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/c29db90341064e0e89e2c0da326b3098 2024-12-13T21:33:55,763 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/7b7c86d4aeed42168c631d4c443b982f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/7b7c86d4aeed42168c631d4c443b982f 2024-12-13T21:33:55,763 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/4c8daadd30a9491fb291da77f1a269b5 2024-12-13T21:33:55,763 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/b708f3ad96dd489784abd6ecf4ff2d42 2024-12-13T21:33:55,763 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/89c6c0c01cba4f0da2ff8120b8503145 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/89c6c0c01cba4f0da2ff8120b8503145 2024-12-13T21:33:55,764 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/3cb2c648033748af81cc307b95d7984b 2024-12-13T21:33:55,764 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/a174fdc024e44cf5bee9601c452bccd6 2024-12-13T21:33:55,764 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/8335bf33252741ca864c05ad5ac93bb3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/8335bf33252741ca864c05ad5ac93bb3 2024-12-13T21:33:55,764 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/93420021fdbd4ed2adf97d664b5bf04a 2024-12-13T21:33:55,764 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/9a6aa1f0359040b58cb8c01d9a547e04 2024-12-13T21:33:55,765 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/17b67cde4a0a40d599148df69a06fdb6 2024-12-13T21:33:55,766 DEBUG [StoreCloser-TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/2683bf9dfb4c4294b6dca3416a2fede9, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/539006ec76b045ae95da303de031a45a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/9a5dad32c0d3441e8774004f1b7c88c3, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/66cfae4cb89148ad858d69eaa72ed6f1, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/d11d7e9be7d544f0a4e47dfea22d4720, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/71865b1ae511432d90db39863012cf7a, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a8bca196c8b64e8082cd94c841475922, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/20387cbbefd145cbaee36929cbf1eb51, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/142dcf1a3f884b30aeb58b783668e0f5, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a16022ed7e0e4107b6f1643681fc1235, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/ec83d509f0a844a8a951e52e57a2a914, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/aaf64375ee8149789caff946f53f1a69, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/359eee6d92764863bf62352b19670e18, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/32b83a0439934032ae2e906e526efbe8, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/5601c8d921a64db2b66f09d8d01223bc, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/94d41941659847c4a0a2c77366e4d5b2, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/beed98573aea42ee8f3a4857be4abc75, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6a10565b5cff453db2866e21d91523cf] to archive 2024-12-13T21:33:55,767 DEBUG [StoreCloser-TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/2683bf9dfb4c4294b6dca3416a2fede9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/2683bf9dfb4c4294b6dca3416a2fede9 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/66cfae4cb89148ad858d69eaa72ed6f1 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/66cfae4cb89148ad858d69eaa72ed6f1 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/539006ec76b045ae95da303de031a45a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/539006ec76b045ae95da303de031a45a 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/71865b1ae511432d90db39863012cf7a to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/71865b1ae511432d90db39863012cf7a 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/9a5dad32c0d3441e8774004f1b7c88c3 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/9a5dad32c0d3441e8774004f1b7c88c3 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/20387cbbefd145cbaee36929cbf1eb51 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/20387cbbefd145cbaee36929cbf1eb51 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a8bca196c8b64e8082cd94c841475922 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a8bca196c8b64e8082cd94c841475922 2024-12-13T21:33:55,769 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/d11d7e9be7d544f0a4e47dfea22d4720 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/d11d7e9be7d544f0a4e47dfea22d4720 2024-12-13T21:33:55,770 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/aaf64375ee8149789caff946f53f1a69 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/aaf64375ee8149789caff946f53f1a69 2024-12-13T21:33:55,770 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/142dcf1a3f884b30aeb58b783668e0f5 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/142dcf1a3f884b30aeb58b783668e0f5 2024-12-13T21:33:55,770 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/32b83a0439934032ae2e906e526efbe8 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/32b83a0439934032ae2e906e526efbe8 2024-12-13T21:33:55,770 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/5601c8d921a64db2b66f09d8d01223bc to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/5601c8d921a64db2b66f09d8d01223bc 2024-12-13T21:33:55,771 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/ec83d509f0a844a8a951e52e57a2a914 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/ec83d509f0a844a8a951e52e57a2a914 2024-12-13T21:33:55,771 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a16022ed7e0e4107b6f1643681fc1235 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/a16022ed7e0e4107b6f1643681fc1235 2024-12-13T21:33:55,771 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/94d41941659847c4a0a2c77366e4d5b2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/94d41941659847c4a0a2c77366e4d5b2 2024-12-13T21:33:55,771 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/359eee6d92764863bf62352b19670e18 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/359eee6d92764863bf62352b19670e18 2024-12-13T21:33:55,771 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/beed98573aea42ee8f3a4857be4abc75 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/beed98573aea42ee8f3a4857be4abc75 2024-12-13T21:33:55,771 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6a10565b5cff453db2866e21d91523cf to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6a10565b5cff453db2866e21d91523cf 2024-12-13T21:33:55,772 DEBUG [StoreCloser-TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3743238a46bd42dab23b43d25eeb8e5f, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/88e2c93c57a24bd89baa1d71905886f7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/50d44a13718e47e8b35e4434dc558037, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/4f40d1c8b7ff4677bd704bf8cef90a2c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/39780e251a324aee8dd860ea9dc5a400, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/fff0829e157b4b90bda86ee5317bae6d, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3592de98662e4b5ba3c0de8652c34d92, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/584f090e7854423eaff2b74fd9ea60b7, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/c3f2bfae53d74c20a92043c38876db27, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3788fc414b584d67a65959deb341fa32, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/621c49236cda464da71c08d0e35b459b, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3a4000cb87a3424b9d6f1678b4a28334, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/736c7c78824e4bc09ab395a11d4ac597, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/e7328ab0d64440aaa8499fd803334303, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/d79b8537eb2f42b0a3d070f1ce92392c, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/17cb7d5f8b164f508c7e70810db3e790, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/1383499e8d324926be385ddd93eed678, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/341e7015266e4ec79e79945e49512655] to archive 2024-12-13T21:33:55,773 DEBUG [StoreCloser-TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/88e2c93c57a24bd89baa1d71905886f7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/88e2c93c57a24bd89baa1d71905886f7 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3743238a46bd42dab23b43d25eeb8e5f to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3743238a46bd42dab23b43d25eeb8e5f 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/39780e251a324aee8dd860ea9dc5a400 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/39780e251a324aee8dd860ea9dc5a400 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/50d44a13718e47e8b35e4434dc558037 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/50d44a13718e47e8b35e4434dc558037 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/fff0829e157b4b90bda86ee5317bae6d to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/fff0829e157b4b90bda86ee5317bae6d 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/584f090e7854423eaff2b74fd9ea60b7 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/584f090e7854423eaff2b74fd9ea60b7 2024-12-13T21:33:55,774 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/4f40d1c8b7ff4677bd704bf8cef90a2c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/4f40d1c8b7ff4677bd704bf8cef90a2c 2024-12-13T21:33:55,775 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3592de98662e4b5ba3c0de8652c34d92 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3592de98662e4b5ba3c0de8652c34d92 2024-12-13T21:33:55,775 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3a4000cb87a3424b9d6f1678b4a28334 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3a4000cb87a3424b9d6f1678b4a28334 2024-12-13T21:33:55,775 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/c3f2bfae53d74c20a92043c38876db27 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/c3f2bfae53d74c20a92043c38876db27 2024-12-13T21:33:55,775 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/736c7c78824e4bc09ab395a11d4ac597 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/736c7c78824e4bc09ab395a11d4ac597 2024-12-13T21:33:55,775 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3788fc414b584d67a65959deb341fa32 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/3788fc414b584d67a65959deb341fa32 2024-12-13T21:33:55,775 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/e7328ab0d64440aaa8499fd803334303 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/e7328ab0d64440aaa8499fd803334303 2024-12-13T21:33:55,776 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/d79b8537eb2f42b0a3d070f1ce92392c to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/d79b8537eb2f42b0a3d070f1ce92392c 2024-12-13T21:33:55,776 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/621c49236cda464da71c08d0e35b459b to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/621c49236cda464da71c08d0e35b459b 2024-12-13T21:33:55,776 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/17cb7d5f8b164f508c7e70810db3e790 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/17cb7d5f8b164f508c7e70810db3e790 2024-12-13T21:33:55,776 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/1383499e8d324926be385ddd93eed678 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/1383499e8d324926be385ddd93eed678 2024-12-13T21:33:55,776 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/341e7015266e4ec79e79945e49512655 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/341e7015266e4ec79e79945e49512655 2024-12-13T21:33:55,778 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/recovered.edits/268.seqid, newMaxSeqId=268, maxSeqId=4 2024-12-13T21:33:55,779 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49. 2024-12-13T21:33:55,779 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for c4e9541490a67306648a9c57a40aab49: 2024-12-13T21:33:55,780 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:55,780 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=c4e9541490a67306648a9c57a40aab49, regionState=CLOSED 2024-12-13T21:33:55,782 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-13T21:33:55,782 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure c4e9541490a67306648a9c57a40aab49, server=fd052dae32be,38989,1734125418878 in 1.4660 sec 2024-12-13T21:33:55,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-13T21:33:55,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c4e9541490a67306648a9c57a40aab49, UNASSIGN in 1.4710 sec 2024-12-13T21:33:55,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-13T21:33:55,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4740 sec 2024-12-13T21:33:55,784 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734125635784"}]},"ts":"1734125635784"} 2024-12-13T21:33:55,785 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-13T21:33:55,825 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-13T21:33:55,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5400 sec 2024-12-13T21:33:56,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T21:33:56,398 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-13T21:33:56,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-12-13T21:33:56,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:56,402 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:56,403 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-13T21:33:56,405 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,409 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/recovered.edits] 2024-12-13T21:33:56,413 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/64bd7d2feb3e4748b17605231f708451 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/64bd7d2feb3e4748b17605231f708451 2024-12-13T21:33:56,413 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/78e81a69d24b40e6b357dc4cc2741389 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/A/78e81a69d24b40e6b357dc4cc2741389 2024-12-13T21:33:56,416 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6f7114d581b24edf8580484dd53313a2 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/6f7114d581b24edf8580484dd53313a2 2024-12-13T21:33:56,416 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/265b4e22e38b49e7b25983ad7d3061c9 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/B/265b4e22e38b49e7b25983ad7d3061c9 2024-12-13T21:33:56,419 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/349ff9af19b94f6d8be6c22f7a7cec26 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/349ff9af19b94f6d8be6c22f7a7cec26 2024-12-13T21:33:56,419 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/462222e8b42148518ce6bbb51f70c571 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/C/462222e8b42148518ce6bbb51f70c571 2024-12-13T21:33:56,422 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/recovered.edits/268.seqid to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49/recovered.edits/268.seqid 2024-12-13T21:33:56,423 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/default/TestAcidGuarantees/c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,423 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-13T21:33:56,423 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-13T21:33:56,424 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-13T21:33:56,431 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213329f341397b0480197e6296678e26e8c_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213329f341397b0480197e6296678e26e8c_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,431 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121351eb39024bf748bf9c5e1117fa52bcb2_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121351eb39024bf748bf9c5e1117fa52bcb2_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,431 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213424df36543814ec7a58e128ef9a729d2_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213424df36543814ec7a58e128ef9a729d2_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,432 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121378e18f841eb143e3b243d947c5effb5b_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121378e18f841eb143e3b243d947c5effb5b_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,432 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b0fce5cec6fe479fb262b3c0b82012d2_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213b0fce5cec6fe479fb262b3c0b82012d2_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,432 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412138ea06c36e36e4fa595390e76a29c3707_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412138ea06c36e36e4fa595390e76a29c3707_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,432 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c72cecd997b5494fb5183f7c55e53575_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c72cecd997b5494fb5183f7c55e53575_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,432 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412135d6643a012e4423b926a2876bbf5ee92_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412135d6643a012e4423b926a2876bbf5ee92_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,433 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c82f94c494904dbc92a530e8063127e6_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213c82f94c494904dbc92a530e8063127e6_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,433 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f307ad89d0884cd6b7114b173ddc0372_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f307ad89d0884cd6b7114b173ddc0372_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,433 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213db91fb8194a241f08acc0f229985d781_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213db91fb8194a241f08acc0f229985d781_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,433 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f6cd70b65e714e1e8aaa46f20ed6a7c7_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213f6cd70b65e714e1e8aaa46f20ed6a7c7_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,433 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213facf9ca2e1874da6bb84572b3d75cdd8_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213facf9ca2e1874da6bb84572b3d75cdd8_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,433 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ccc5df23660146f2ba44e28c37321509_c4e9541490a67306648a9c57a40aab49 to hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241213ccc5df23660146f2ba44e28c37321509_c4e9541490a67306648a9c57a40aab49 2024-12-13T21:33:56,434 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-13T21:33:56,437 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:56,438 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-13T21:33:56,440 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-13T21:33:56,441 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:56,441 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-13T21:33:56,441 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734125636441"}]},"ts":"9223372036854775807"} 2024-12-13T21:33:56,443 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T21:33:56,443 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c4e9541490a67306648a9c57a40aab49, NAME => 'TestAcidGuarantees,,1734125602631.c4e9541490a67306648a9c57a40aab49.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T21:33:56,443 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-13T21:33:56,443 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734125636443"}]},"ts":"9223372036854775807"} 2024-12-13T21:33:56,444 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-13T21:33:56,484 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-13T21:33:56,486 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-12-13T21:33:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33659 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-13T21:33:56,505 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-13T21:33:56,522 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=243 (was 243), OpenFileDescriptor=447 (was 447), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=409 (was 471), ProcessCount=9 (was 9), AvailableMemoryMB=4097 (was 4111) 2024-12-13T21:33:56,522 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T21:33:56,522 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T21:33:56,522 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a3c3fb3 to 127.0.0.1:57927 2024-12-13T21:33:56,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:56,522 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T21:33:56,522 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=479130374, stopped=false 2024-12-13T21:33:56,522 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=fd052dae32be,33659,1734125418153 2024-12-13T21:33:56,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T21:33:56,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T21:33:56,533 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T21:33:56,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:33:56,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:33:56,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:56,534 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'fd052dae32be,38989,1734125418878' ***** 2024-12-13T21:33:56,534 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T21:33:56,534 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T21:33:56,534 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T21:33:56,534 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T21:33:56,535 INFO [RS:0;fd052dae32be:38989 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T21:33:56,535 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T21:33:56,535 INFO [RS:0;fd052dae32be:38989 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T21:33:56,535 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(3579): Received CLOSE for 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:33:56,536 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1224): stopping server fd052dae32be,38989,1734125418878 2024-12-13T21:33:56,536 DEBUG [RS:0;fd052dae32be:38989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:56,536 INFO [RS:0;fd052dae32be:38989 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T21:33:56,536 INFO [RS:0;fd052dae32be:38989 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T21:33:56,536 INFO [RS:0;fd052dae32be:38989 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T21:33:56,536 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T21:33:56,536 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3a53a56d105c8fa6ce8789bb1b9d7a71, disabling compactions & flushes 2024-12-13T21:33:56,536 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:33:56,536 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:33:56,536 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. after waiting 0 ms 2024-12-13T21:33:56,536 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-13T21:33:56,536 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:33:56,536 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 3a53a56d105c8fa6ce8789bb1b9d7a71=hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71.} 2024-12-13T21:33:56,536 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3a53a56d105c8fa6ce8789bb1b9d7a71 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T21:33:56,536 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T21:33:56,536 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T21:33:56,537 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T21:33:56,537 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T21:33:56,537 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T21:33:56,537 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-13T21:33:56,540 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:33:56,543 INFO [regionserver/fd052dae32be:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T21:33:56,559 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/.tmp/info/a971dbbebc9248c59c78db04a905429a is 45, key is default/info:d/1734125424975/Put/seqid=0 2024-12-13T21:33:56,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742470_1646 (size=5037) 2024-12-13T21:33:56,566 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/info/54040c4780104d16948e0164426a7d6b is 143, key is hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71./info:regioninfo/1734125424875/Put/seqid=0 2024-12-13T21:33:56,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742471_1647 (size=7725) 2024-12-13T21:33:56,741 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:33:56,941 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3a53a56d105c8fa6ce8789bb1b9d7a71 2024-12-13T21:33:56,964 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/.tmp/info/a971dbbebc9248c59c78db04a905429a 2024-12-13T21:33:56,970 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/info/54040c4780104d16948e0164426a7d6b 2024-12-13T21:33:56,974 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/.tmp/info/a971dbbebc9248c59c78db04a905429a as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/info/a971dbbebc9248c59c78db04a905429a 2024-12-13T21:33:56,977 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/info/a971dbbebc9248c59c78db04a905429a, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T21:33:56,978 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 3a53a56d105c8fa6ce8789bb1b9d7a71 in 442ms, sequenceid=6, compaction requested=false 2024-12-13T21:33:56,981 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/namespace/3a53a56d105c8fa6ce8789bb1b9d7a71/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T21:33:56,982 INFO [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:33:56,982 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3a53a56d105c8fa6ce8789bb1b9d7a71: 2024-12-13T21:33:56,982 DEBUG [RS_CLOSE_REGION-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734125423586.3a53a56d105c8fa6ce8789bb1b9d7a71. 2024-12-13T21:33:56,994 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/rep_barrier/f133833a44df43ad970960dc8bbfea24 is 102, key is TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789./rep_barrier:/1734125453663/DeleteFamily/seqid=0 2024-12-13T21:33:56,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742472_1648 (size=6025) 2024-12-13T21:33:57,100 INFO [regionserver/fd052dae32be:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-13T21:33:57,100 INFO [regionserver/fd052dae32be:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-13T21:33:57,142 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-13T21:33:57,342 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-13T21:33:57,398 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/rep_barrier/f133833a44df43ad970960dc8bbfea24 2024-12-13T21:33:57,419 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/table/e094712c685d44c4974e8f6dc2804bc8 is 96, key is TestAcidGuarantees,,1734125425218.cc66b42faaed28a8693a712966f73789./table:/1734125453663/DeleteFamily/seqid=0 2024-12-13T21:33:57,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742473_1649 (size=5942) 2024-12-13T21:33:57,542 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-13T21:33:57,542 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-13T21:33:57,543 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-13T21:33:57,743 DEBUG [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-13T21:33:57,824 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/table/e094712c685d44c4974e8f6dc2804bc8 2024-12-13T21:33:57,833 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/info/54040c4780104d16948e0164426a7d6b as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/info/54040c4780104d16948e0164426a7d6b 2024-12-13T21:33:57,836 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/info/54040c4780104d16948e0164426a7d6b, entries=22, sequenceid=93, filesize=7.5 K 2024-12-13T21:33:57,837 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/rep_barrier/f133833a44df43ad970960dc8bbfea24 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/rep_barrier/f133833a44df43ad970960dc8bbfea24 2024-12-13T21:33:57,840 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/rep_barrier/f133833a44df43ad970960dc8bbfea24, entries=6, sequenceid=93, filesize=5.9 K 2024-12-13T21:33:57,840 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/.tmp/table/e094712c685d44c4974e8f6dc2804bc8 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/table/e094712c685d44c4974e8f6dc2804bc8 2024-12-13T21:33:57,843 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/table/e094712c685d44c4974e8f6dc2804bc8, entries=9, sequenceid=93, filesize=5.8 K 2024-12-13T21:33:57,844 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1307ms, sequenceid=93, compaction requested=false 2024-12-13T21:33:57,848 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-13T21:33:57,849 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T21:33:57,849 INFO [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T21:33:57,849 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T21:33:57,849 DEBUG [RS_CLOSE_META-regionserver/fd052dae32be:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T21:33:57,943 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1250): stopping server fd052dae32be,38989,1734125418878; all regions closed. 2024-12-13T21:33:57,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741834_1010 (size=26050) 2024-12-13T21:33:57,956 DEBUG [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/oldWALs 2024-12-13T21:33:57,956 INFO [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL fd052dae32be%2C38989%2C1734125418878.meta:.meta(num 1734125423248) 2024-12-13T21:33:57,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741832_1008 (size=14432150) 2024-12-13T21:33:57,961 DEBUG [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/oldWALs 2024-12-13T21:33:57,961 INFO [RS:0;fd052dae32be:38989 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL fd052dae32be%2C38989%2C1734125418878:(num 1734125422252) 2024-12-13T21:33:57,961 DEBUG [RS:0;fd052dae32be:38989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:57,961 INFO [RS:0;fd052dae32be:38989 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T21:33:57,962 INFO [RS:0;fd052dae32be:38989 {}] hbase.ChoreService(370): Chore service for: regionserver/fd052dae32be:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T21:33:57,962 INFO [regionserver/fd052dae32be:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T21:33:57,963 INFO [RS:0;fd052dae32be:38989 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:38989 2024-12-13T21:33:58,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fd052dae32be,38989,1734125418878 2024-12-13T21:33:58,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T21:33:58,016 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fd052dae32be,38989,1734125418878] 2024-12-13T21:33:58,017 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing fd052dae32be,38989,1734125418878; numProcessing=1 2024-12-13T21:33:58,025 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/fd052dae32be,38989,1734125418878 already deleted, retry=false 2024-12-13T21:33:58,025 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; fd052dae32be,38989,1734125418878 expired; onlineServers=0 2024-12-13T21:33:58,025 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'fd052dae32be,33659,1734125418153' ***** 2024-12-13T21:33:58,025 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T21:33:58,025 DEBUG [M:0;fd052dae32be:33659 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6132fdd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fd052dae32be/172.17.0.3:0 2024-12-13T21:33:58,026 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegionServer(1224): stopping server fd052dae32be,33659,1734125418153 2024-12-13T21:33:58,026 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegionServer(1250): stopping server fd052dae32be,33659,1734125418153; all regions closed. 2024-12-13T21:33:58,026 DEBUG [M:0;fd052dae32be:33659 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T21:33:58,026 DEBUG [M:0;fd052dae32be:33659 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T21:33:58,026 DEBUG [M:0;fd052dae32be:33659 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T21:33:58,026 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T21:33:58,026 DEBUG [master/fd052dae32be:0:becomeActiveMaster-HFileCleaner.small.0-1734125421941 {}] cleaner.HFileCleaner(306): Exit Thread[master/fd052dae32be:0:becomeActiveMaster-HFileCleaner.small.0-1734125421941,5,FailOnTimeoutGroup] 2024-12-13T21:33:58,026 DEBUG [master/fd052dae32be:0:becomeActiveMaster-HFileCleaner.large.0-1734125421937 {}] cleaner.HFileCleaner(306): Exit Thread[master/fd052dae32be:0:becomeActiveMaster-HFileCleaner.large.0-1734125421937,5,FailOnTimeoutGroup] 2024-12-13T21:33:58,027 INFO [M:0;fd052dae32be:33659 {}] hbase.ChoreService(370): Chore service for: master/fd052dae32be:0 had [] on shutdown 2024-12-13T21:33:58,027 DEBUG [M:0;fd052dae32be:33659 {}] master.HMaster(1733): Stopping service threads 2024-12-13T21:33:58,027 INFO [M:0;fd052dae32be:33659 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T21:33:58,029 INFO [M:0;fd052dae32be:33659 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T21:33:58,029 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T21:33:58,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T21:33:58,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T21:33:58,033 DEBUG [M:0;fd052dae32be:33659 {}] zookeeper.ZKUtil(347): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T21:33:58,033 WARN [M:0;fd052dae32be:33659 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T21:33:58,033 INFO [M:0;fd052dae32be:33659 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T21:33:58,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T21:33:58,033 INFO [M:0;fd052dae32be:33659 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T21:33:58,033 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T21:33:58,034 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:33:58,034 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:33:58,034 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T21:33:58,034 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:33:58,034 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=786.90 KB heapSize=969.06 KB 2024-12-13T21:33:58,051 DEBUG [M:0;fd052dae32be:33659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/89a2d77105b84ec6b8608b1fb953644c is 82, key is hbase:meta,,1/info:regioninfo/1734125423369/Put/seqid=0 2024-12-13T21:33:58,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742474_1650 (size=5672) 2024-12-13T21:33:58,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T21:33:58,117 INFO [RS:0;fd052dae32be:38989 {}] regionserver.HRegionServer(1307): Exiting; stopping=fd052dae32be,38989,1734125418878; zookeeper connection closed. 2024-12-13T21:33:58,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38989-0x100214d103e0001, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T21:33:58,117 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a1c2581 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a1c2581 2024-12-13T21:33:58,118 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-13T21:33:58,456 INFO [M:0;fd052dae32be:33659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2247 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/89a2d77105b84ec6b8608b1fb953644c 2024-12-13T21:33:58,481 DEBUG [M:0;fd052dae32be:33659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0275a935d4b4195a7235356f8bbceda is 2284, key is \x00\x00\x00\x00\x00\x00\x00\xA2/proc:d/1734125605749/Put/seqid=0 2024-12-13T21:33:58,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742475_1651 (size=45741) 2024-12-13T21:33:58,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T21:33:58,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T21:33:58,565 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T21:33:58,565 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-13T21:33:58,885 INFO [M:0;fd052dae32be:33659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=786.34 KB at sequenceid=2247 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0275a935d4b4195a7235356f8bbceda 2024-12-13T21:33:58,887 INFO [M:0;fd052dae32be:33659 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b0275a935d4b4195a7235356f8bbceda 2024-12-13T21:33:58,899 DEBUG [M:0;fd052dae32be:33659 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cc352828d89148739a8fc121fe674b39 is 69, key is fd052dae32be,38989,1734125418878/rs:state/1734125422027/Put/seqid=0 2024-12-13T21:33:58,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073742476_1652 (size=5156) 2024-12-13T21:33:59,303 INFO [M:0;fd052dae32be:33659 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2247 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cc352828d89148739a8fc121fe674b39 2024-12-13T21:33:59,311 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/89a2d77105b84ec6b8608b1fb953644c as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/89a2d77105b84ec6b8608b1fb953644c 2024-12-13T21:33:59,315 INFO [M:0;fd052dae32be:33659 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/89a2d77105b84ec6b8608b1fb953644c, entries=8, sequenceid=2247, filesize=5.5 K 2024-12-13T21:33:59,316 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0275a935d4b4195a7235356f8bbceda as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0275a935d4b4195a7235356f8bbceda 2024-12-13T21:33:59,319 INFO [M:0;fd052dae32be:33659 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b0275a935d4b4195a7235356f8bbceda 2024-12-13T21:33:59,319 INFO [M:0;fd052dae32be:33659 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0275a935d4b4195a7235356f8bbceda, entries=183, sequenceid=2247, filesize=44.7 K 2024-12-13T21:33:59,319 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cc352828d89148739a8fc121fe674b39 as hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cc352828d89148739a8fc121fe674b39 2024-12-13T21:33:59,322 INFO [M:0;fd052dae32be:33659 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/b87f2030-8753-9c1e-fddd-8594c2b4cd05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cc352828d89148739a8fc121fe674b39, entries=1, sequenceid=2247, filesize=5.0 K 2024-12-13T21:33:59,323 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegion(3040): Finished flush of dataSize ~786.90 KB/805784, heapSize ~968.77 KB/992016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1289ms, sequenceid=2247, compaction requested=false 2024-12-13T21:33:59,324 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T21:33:59,324 DEBUG [M:0;fd052dae32be:33659 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T21:33:59,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741830_1006 (size=953094) 2024-12-13T21:33:59,326 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T21:33:59,326 INFO [M:0;fd052dae32be:33659 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T21:33:59,327 INFO [M:0;fd052dae32be:33659 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:33659 2024-12-13T21:33:59,418 DEBUG [M:0;fd052dae32be:33659 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/fd052dae32be,33659,1734125418153 already deleted, retry=false 2024-12-13T21:33:59,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T21:33:59,534 INFO [M:0;fd052dae32be:33659 {}] regionserver.HRegionServer(1307): Exiting; stopping=fd052dae32be,33659,1734125418153; zookeeper connection closed. 2024-12-13T21:33:59,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33659-0x100214d103e0000, quorum=127.0.0.1:57927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T21:33:59,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bd2e890{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T21:33:59,547 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T21:33:59,547 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T21:33:59,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T21:33:59,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/hadoop.log.dir/,STOPPED} 2024-12-13T21:33:59,550 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T21:33:59,550 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T21:33:59,550 WARN [BP-1981329684-172.17.0.3-1734125415023 heartbeating to localhost/127.0.0.1:34065 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T21:33:59,550 WARN [BP-1981329684-172.17.0.3-1734125415023 heartbeating to localhost/127.0.0.1:34065 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1981329684-172.17.0.3-1734125415023 (Datanode Uuid 6c32d6a2-6df5-4930-9540-0263abbb0f59) service to localhost/127.0.0.1:34065 2024-12-13T21:33:59,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/dfs/data/data1/current/BP-1981329684-172.17.0.3-1734125415023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T21:33:59,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/cluster_6d61a109-3f65-6911-b45f-66524476d70f/dfs/data/data2/current/BP-1981329684-172.17.0.3-1734125415023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T21:33:59,554 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T21:33:59,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0d4558{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T21:33:59,561 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T21:33:59,561 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T21:33:59,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T21:33:59,561 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9b485bf9-7589-e7b0-7a92-7d8bf037cdd4/hadoop.log.dir/,STOPPED} 2024-12-13T21:33:59,577 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T21:33:59,693 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down